[PATCH 7/57][Arm][GAS] Add support for MVE instructions: vstr/vldr
[binutils-gdb.git] / gdb / dwarf2expr.c
blob3bd9abc4401e4d555704bb27987c9487a72571e1
1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2019 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30 #include "common/underlying.h"
32 /* Cookie for gdbarch data. */
34 static struct gdbarch_data *dwarf_arch_cookie;
36 /* This holds gdbarch-specific types used by the DWARF expression
37 evaluator. See comments in execute_stack_op. */
39 struct dwarf_gdbarch_types
41 struct type *dw_types[3];
44 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
46 static void *
47 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
49 struct dwarf_gdbarch_types *types
50 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
52 /* The types themselves are lazily initialized. */
54 return types;
57 /* Return the type used for DWARF operations where the type is
58 unspecified in the DWARF spec. Only certain sizes are
59 supported. */
61 struct type *
62 dwarf_expr_context::address_type () const
64 struct dwarf_gdbarch_types *types
65 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
66 dwarf_arch_cookie);
67 int ndx;
69 if (this->addr_size == 2)
70 ndx = 0;
71 else if (this->addr_size == 4)
72 ndx = 1;
73 else if (this->addr_size == 8)
74 ndx = 2;
75 else
76 error (_("Unsupported address size in DWARF expressions: %d bits"),
77 8 * this->addr_size);
79 if (types->dw_types[ndx] == NULL)
80 types->dw_types[ndx]
81 = arch_integer_type (this->gdbarch,
82 8 * this->addr_size,
83 0, "<signed DWARF address type>");
85 return types->dw_types[ndx];
88 /* Create a new context for the expression evaluator. */
90 dwarf_expr_context::dwarf_expr_context ()
91 : gdbarch (NULL),
92 addr_size (0),
93 ref_addr_size (0),
94 offset (0),
95 recursion_depth (0),
96 max_recursion_depth (0x100),
97 location (DWARF_VALUE_MEMORY),
98 len (0),
99 data (NULL),
100 initialized (0)
104 /* Push VALUE onto the stack. */
106 void
107 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
109 stack.emplace_back (value, in_stack_memory);
112 /* Push VALUE onto the stack. */
114 void
115 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
117 push (value_from_ulongest (address_type (), value), in_stack_memory);
120 /* Pop the top item off of the stack. */
122 void
123 dwarf_expr_context::pop ()
125 if (stack.empty ())
126 error (_("dwarf expression stack underflow"));
128 stack.pop_back ();
131 /* Retrieve the N'th item on the stack. */
133 struct value *
134 dwarf_expr_context::fetch (int n)
136 if (stack.size () <= n)
137 error (_("Asked for position %d of stack, "
138 "stack only has %zu elements on it."),
139 n, stack.size ());
140 return stack[stack.size () - (1 + n)].value;
143 /* Require that TYPE be an integral type; throw an exception if not. */
145 static void
146 dwarf_require_integral (struct type *type)
148 if (TYPE_CODE (type) != TYPE_CODE_INT
149 && TYPE_CODE (type) != TYPE_CODE_CHAR
150 && TYPE_CODE (type) != TYPE_CODE_BOOL)
151 error (_("integral type expected in DWARF expression"));
154 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
155 type. */
157 static struct type *
158 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
160 switch (TYPE_LENGTH (type))
162 case 1:
163 return builtin_type (gdbarch)->builtin_uint8;
164 case 2:
165 return builtin_type (gdbarch)->builtin_uint16;
166 case 4:
167 return builtin_type (gdbarch)->builtin_uint32;
168 case 8:
169 return builtin_type (gdbarch)->builtin_uint64;
170 default:
171 error (_("no unsigned variant found for type, while evaluating "
172 "DWARF expression"));
176 /* Return the signed form of TYPE. TYPE is necessarily an integral
177 type. */
179 static struct type *
180 get_signed_type (struct gdbarch *gdbarch, struct type *type)
182 switch (TYPE_LENGTH (type))
184 case 1:
185 return builtin_type (gdbarch)->builtin_int8;
186 case 2:
187 return builtin_type (gdbarch)->builtin_int16;
188 case 4:
189 return builtin_type (gdbarch)->builtin_int32;
190 case 8:
191 return builtin_type (gdbarch)->builtin_int64;
192 default:
193 error (_("no signed variant found for type, while evaluating "
194 "DWARF expression"));
198 /* Retrieve the N'th item on the stack, converted to an address. */
200 CORE_ADDR
201 dwarf_expr_context::fetch_address (int n)
203 struct value *result_val = fetch (n);
204 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
205 ULONGEST result;
207 dwarf_require_integral (value_type (result_val));
208 result = extract_unsigned_integer (value_contents (result_val),
209 TYPE_LENGTH (value_type (result_val)),
210 byte_order);
212 /* For most architectures, calling extract_unsigned_integer() alone
213 is sufficient for extracting an address. However, some
214 architectures (e.g. MIPS) use signed addresses and using
215 extract_unsigned_integer() will not produce a correct
216 result. Make sure we invoke gdbarch_integer_to_address()
217 for those architectures which require it. */
218 if (gdbarch_integer_to_address_p (this->gdbarch))
220 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
221 struct type *int_type = get_unsigned_type (this->gdbarch,
222 value_type (result_val));
224 store_unsigned_integer (buf, this->addr_size, byte_order, result);
225 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
228 return (CORE_ADDR) result;
231 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
233 bool
234 dwarf_expr_context::fetch_in_stack_memory (int n)
236 if (stack.size () <= n)
237 error (_("Asked for position %d of stack, "
238 "stack only has %zu elements on it."),
239 n, stack.size ());
240 return stack[stack.size () - (1 + n)].in_stack_memory;
243 /* Return true if the expression stack is empty. */
245 bool
246 dwarf_expr_context::stack_empty_p () const
248 return stack.empty ();
251 /* Add a new piece to the dwarf_expr_context's piece list. */
252 void
253 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
255 this->pieces.emplace_back ();
256 dwarf_expr_piece &p = this->pieces.back ();
258 p.location = this->location;
259 p.size = size;
260 p.offset = offset;
262 if (p.location == DWARF_VALUE_LITERAL)
264 p.v.literal.data = this->data;
265 p.v.literal.length = this->len;
267 else if (stack_empty_p ())
269 p.location = DWARF_VALUE_OPTIMIZED_OUT;
270 /* Also reset the context's location, for our callers. This is
271 a somewhat strange approach, but this lets us avoid setting
272 the location to DWARF_VALUE_MEMORY in all the individual
273 cases in the evaluator. */
274 this->location = DWARF_VALUE_OPTIMIZED_OUT;
276 else if (p.location == DWARF_VALUE_MEMORY)
278 p.v.mem.addr = fetch_address (0);
279 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
281 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
283 p.v.ptr.die_sect_off = (sect_offset) this->len;
284 p.v.ptr.offset = value_as_long (fetch (0));
286 else if (p.location == DWARF_VALUE_REGISTER)
287 p.v.regno = value_as_long (fetch (0));
288 else
290 p.v.value = fetch (0);
294 /* Evaluate the expression at ADDR (LEN bytes long). */
296 void
297 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
299 int old_recursion_depth = this->recursion_depth;
301 execute_stack_op (addr, addr + len);
303 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
305 gdb_assert (this->recursion_depth == old_recursion_depth);
308 /* Helper to read a uleb128 value or throw an error. */
310 const gdb_byte *
311 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
312 uint64_t *r)
314 buf = gdb_read_uleb128 (buf, buf_end, r);
315 if (buf == NULL)
316 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
317 return buf;
320 /* Helper to read a sleb128 value or throw an error. */
322 const gdb_byte *
323 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
324 int64_t *r)
326 buf = gdb_read_sleb128 (buf, buf_end, r);
327 if (buf == NULL)
328 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
329 return buf;
332 const gdb_byte *
333 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
335 buf = gdb_skip_leb128 (buf, buf_end);
336 if (buf == NULL)
337 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
338 return buf;
342 /* Check that the current operator is either at the end of an
343 expression, or that it is followed by a composition operator or by
344 DW_OP_GNU_uninit (which should terminate the expression). */
346 void
347 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
348 const char *op_name)
350 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
351 && *op_ptr != DW_OP_GNU_uninit)
352 error (_("DWARF-2 expression error: `%s' operations must be "
353 "used either alone or in conjunction with DW_OP_piece "
354 "or DW_OP_bit_piece."),
355 op_name);
358 /* Return true iff the types T1 and T2 are "the same". This only does
359 checks that might reasonably be needed to compare DWARF base
360 types. */
362 static int
363 base_types_equal_p (struct type *t1, struct type *t2)
365 if (TYPE_CODE (t1) != TYPE_CODE (t2))
366 return 0;
367 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
368 return 0;
369 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
372 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
373 DWARF register number. Otherwise return -1. */
376 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
378 uint64_t dwarf_reg;
380 if (buf_end <= buf)
381 return -1;
382 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
384 if (buf_end - buf != 1)
385 return -1;
386 return *buf - DW_OP_reg0;
389 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
391 buf++;
392 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
393 if (buf == NULL)
394 return -1;
395 buf = gdb_skip_leb128 (buf, buf_end);
396 if (buf == NULL)
397 return -1;
399 else if (*buf == DW_OP_regx)
401 buf++;
402 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
403 if (buf == NULL)
404 return -1;
406 else
407 return -1;
408 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
409 return -1;
410 return dwarf_reg;
413 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
414 DW_OP_deref* return the DWARF register number. Otherwise return -1.
415 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
416 size from DW_OP_deref_size. */
419 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
420 CORE_ADDR *deref_size_return)
422 uint64_t dwarf_reg;
423 int64_t offset;
425 if (buf_end <= buf)
426 return -1;
428 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
430 dwarf_reg = *buf - DW_OP_breg0;
431 buf++;
432 if (buf >= buf_end)
433 return -1;
435 else if (*buf == DW_OP_bregx)
437 buf++;
438 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
439 if (buf == NULL)
440 return -1;
441 if ((int) dwarf_reg != dwarf_reg)
442 return -1;
444 else
445 return -1;
447 buf = gdb_read_sleb128 (buf, buf_end, &offset);
448 if (buf == NULL)
449 return -1;
450 if (offset != 0)
451 return -1;
453 if (*buf == DW_OP_deref)
455 buf++;
456 *deref_size_return = -1;
458 else if (*buf == DW_OP_deref_size)
460 buf++;
461 if (buf >= buf_end)
462 return -1;
463 *deref_size_return = *buf++;
465 else
466 return -1;
468 if (buf != buf_end)
469 return -1;
471 return dwarf_reg;
474 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
475 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
478 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
479 CORE_ADDR *fb_offset_return)
481 int64_t fb_offset;
483 if (buf_end <= buf)
484 return 0;
486 if (*buf != DW_OP_fbreg)
487 return 0;
488 buf++;
490 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
491 if (buf == NULL)
492 return 0;
493 *fb_offset_return = fb_offset;
494 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
495 return 0;
497 return 1;
500 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
501 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
502 The matched SP register number depends on GDBARCH. */
505 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
506 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
508 uint64_t dwarf_reg;
509 int64_t sp_offset;
511 if (buf_end <= buf)
512 return 0;
513 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
515 dwarf_reg = *buf - DW_OP_breg0;
516 buf++;
518 else
520 if (*buf != DW_OP_bregx)
521 return 0;
522 buf++;
523 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
524 if (buf == NULL)
525 return 0;
528 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
529 != gdbarch_sp_regnum (gdbarch))
530 return 0;
532 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
533 if (buf == NULL)
534 return 0;
535 *sp_offset_return = sp_offset;
536 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
537 return 0;
539 return 1;
542 /* The engine for the expression evaluator. Using the context in this
543 object, evaluate the expression between OP_PTR and OP_END. */
545 void
546 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
547 const gdb_byte *op_end)
549 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
550 /* Old-style "untyped" DWARF values need special treatment in a
551 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
552 a special type for these values so we can distinguish them from
553 values that have an explicit type, because explicitly-typed
554 values do not need special treatment. This special type must be
555 different (in the `==' sense) from any base type coming from the
556 CU. */
557 struct type *address_type = this->address_type ();
559 this->location = DWARF_VALUE_MEMORY;
560 this->initialized = 1; /* Default is initialized. */
562 if (this->recursion_depth > this->max_recursion_depth)
563 error (_("DWARF-2 expression error: Loop detected (%d)."),
564 this->recursion_depth);
565 this->recursion_depth++;
567 while (op_ptr < op_end)
569 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
570 ULONGEST result;
571 /* Assume the value is not in stack memory.
572 Code that knows otherwise sets this to true.
573 Some arithmetic on stack addresses can probably be assumed to still
574 be a stack address, but we skip this complication for now.
575 This is just an optimization, so it's always ok to punt
576 and leave this as false. */
577 bool in_stack_memory = false;
578 uint64_t uoffset, reg;
579 int64_t offset;
580 struct value *result_val = NULL;
582 /* The DWARF expression might have a bug causing an infinite
583 loop. In that case, quitting is the only way out. */
584 QUIT;
586 switch (op)
588 case DW_OP_lit0:
589 case DW_OP_lit1:
590 case DW_OP_lit2:
591 case DW_OP_lit3:
592 case DW_OP_lit4:
593 case DW_OP_lit5:
594 case DW_OP_lit6:
595 case DW_OP_lit7:
596 case DW_OP_lit8:
597 case DW_OP_lit9:
598 case DW_OP_lit10:
599 case DW_OP_lit11:
600 case DW_OP_lit12:
601 case DW_OP_lit13:
602 case DW_OP_lit14:
603 case DW_OP_lit15:
604 case DW_OP_lit16:
605 case DW_OP_lit17:
606 case DW_OP_lit18:
607 case DW_OP_lit19:
608 case DW_OP_lit20:
609 case DW_OP_lit21:
610 case DW_OP_lit22:
611 case DW_OP_lit23:
612 case DW_OP_lit24:
613 case DW_OP_lit25:
614 case DW_OP_lit26:
615 case DW_OP_lit27:
616 case DW_OP_lit28:
617 case DW_OP_lit29:
618 case DW_OP_lit30:
619 case DW_OP_lit31:
620 result = op - DW_OP_lit0;
621 result_val = value_from_ulongest (address_type, result);
622 break;
624 case DW_OP_addr:
625 result = extract_unsigned_integer (op_ptr,
626 this->addr_size, byte_order);
627 op_ptr += this->addr_size;
628 /* Some versions of GCC emit DW_OP_addr before
629 DW_OP_GNU_push_tls_address. In this case the value is an
630 index, not an address. We don't support things like
631 branching between the address and the TLS op. */
632 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
633 result += this->offset;
634 result_val = value_from_ulongest (address_type, result);
635 break;
637 case DW_OP_addrx:
638 case DW_OP_GNU_addr_index:
639 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
640 result = this->get_addr_index (uoffset);
641 result += this->offset;
642 result_val = value_from_ulongest (address_type, result);
643 break;
644 case DW_OP_GNU_const_index:
645 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
646 result = this->get_addr_index (uoffset);
647 result_val = value_from_ulongest (address_type, result);
648 break;
650 case DW_OP_const1u:
651 result = extract_unsigned_integer (op_ptr, 1, byte_order);
652 result_val = value_from_ulongest (address_type, result);
653 op_ptr += 1;
654 break;
655 case DW_OP_const1s:
656 result = extract_signed_integer (op_ptr, 1, byte_order);
657 result_val = value_from_ulongest (address_type, result);
658 op_ptr += 1;
659 break;
660 case DW_OP_const2u:
661 result = extract_unsigned_integer (op_ptr, 2, byte_order);
662 result_val = value_from_ulongest (address_type, result);
663 op_ptr += 2;
664 break;
665 case DW_OP_const2s:
666 result = extract_signed_integer (op_ptr, 2, byte_order);
667 result_val = value_from_ulongest (address_type, result);
668 op_ptr += 2;
669 break;
670 case DW_OP_const4u:
671 result = extract_unsigned_integer (op_ptr, 4, byte_order);
672 result_val = value_from_ulongest (address_type, result);
673 op_ptr += 4;
674 break;
675 case DW_OP_const4s:
676 result = extract_signed_integer (op_ptr, 4, byte_order);
677 result_val = value_from_ulongest (address_type, result);
678 op_ptr += 4;
679 break;
680 case DW_OP_const8u:
681 result = extract_unsigned_integer (op_ptr, 8, byte_order);
682 result_val = value_from_ulongest (address_type, result);
683 op_ptr += 8;
684 break;
685 case DW_OP_const8s:
686 result = extract_signed_integer (op_ptr, 8, byte_order);
687 result_val = value_from_ulongest (address_type, result);
688 op_ptr += 8;
689 break;
690 case DW_OP_constu:
691 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
692 result = uoffset;
693 result_val = value_from_ulongest (address_type, result);
694 break;
695 case DW_OP_consts:
696 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
697 result = offset;
698 result_val = value_from_ulongest (address_type, result);
699 break;
701 /* The DW_OP_reg operations are required to occur alone in
702 location expressions. */
703 case DW_OP_reg0:
704 case DW_OP_reg1:
705 case DW_OP_reg2:
706 case DW_OP_reg3:
707 case DW_OP_reg4:
708 case DW_OP_reg5:
709 case DW_OP_reg6:
710 case DW_OP_reg7:
711 case DW_OP_reg8:
712 case DW_OP_reg9:
713 case DW_OP_reg10:
714 case DW_OP_reg11:
715 case DW_OP_reg12:
716 case DW_OP_reg13:
717 case DW_OP_reg14:
718 case DW_OP_reg15:
719 case DW_OP_reg16:
720 case DW_OP_reg17:
721 case DW_OP_reg18:
722 case DW_OP_reg19:
723 case DW_OP_reg20:
724 case DW_OP_reg21:
725 case DW_OP_reg22:
726 case DW_OP_reg23:
727 case DW_OP_reg24:
728 case DW_OP_reg25:
729 case DW_OP_reg26:
730 case DW_OP_reg27:
731 case DW_OP_reg28:
732 case DW_OP_reg29:
733 case DW_OP_reg30:
734 case DW_OP_reg31:
735 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
737 result = op - DW_OP_reg0;
738 result_val = value_from_ulongest (address_type, result);
739 this->location = DWARF_VALUE_REGISTER;
740 break;
742 case DW_OP_regx:
743 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
744 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
746 result = reg;
747 result_val = value_from_ulongest (address_type, result);
748 this->location = DWARF_VALUE_REGISTER;
749 break;
751 case DW_OP_implicit_value:
753 uint64_t len;
755 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
756 if (op_ptr + len > op_end)
757 error (_("DW_OP_implicit_value: too few bytes available."));
758 this->len = len;
759 this->data = op_ptr;
760 this->location = DWARF_VALUE_LITERAL;
761 op_ptr += len;
762 dwarf_expr_require_composition (op_ptr, op_end,
763 "DW_OP_implicit_value");
765 goto no_push;
767 case DW_OP_stack_value:
768 this->location = DWARF_VALUE_STACK;
769 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
770 goto no_push;
772 case DW_OP_implicit_pointer:
773 case DW_OP_GNU_implicit_pointer:
775 int64_t len;
777 if (this->ref_addr_size == -1)
778 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
779 "is not allowed in frame context"));
781 /* The referred-to DIE of sect_offset kind. */
782 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
783 byte_order);
784 op_ptr += this->ref_addr_size;
786 /* The byte offset into the data. */
787 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
788 result = (ULONGEST) len;
789 result_val = value_from_ulongest (address_type, result);
791 this->location = DWARF_VALUE_IMPLICIT_POINTER;
792 dwarf_expr_require_composition (op_ptr, op_end,
793 "DW_OP_implicit_pointer");
795 break;
797 case DW_OP_breg0:
798 case DW_OP_breg1:
799 case DW_OP_breg2:
800 case DW_OP_breg3:
801 case DW_OP_breg4:
802 case DW_OP_breg5:
803 case DW_OP_breg6:
804 case DW_OP_breg7:
805 case DW_OP_breg8:
806 case DW_OP_breg9:
807 case DW_OP_breg10:
808 case DW_OP_breg11:
809 case DW_OP_breg12:
810 case DW_OP_breg13:
811 case DW_OP_breg14:
812 case DW_OP_breg15:
813 case DW_OP_breg16:
814 case DW_OP_breg17:
815 case DW_OP_breg18:
816 case DW_OP_breg19:
817 case DW_OP_breg20:
818 case DW_OP_breg21:
819 case DW_OP_breg22:
820 case DW_OP_breg23:
821 case DW_OP_breg24:
822 case DW_OP_breg25:
823 case DW_OP_breg26:
824 case DW_OP_breg27:
825 case DW_OP_breg28:
826 case DW_OP_breg29:
827 case DW_OP_breg30:
828 case DW_OP_breg31:
830 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
831 result = this->read_addr_from_reg (op - DW_OP_breg0);
832 result += offset;
833 result_val = value_from_ulongest (address_type, result);
835 break;
836 case DW_OP_bregx:
838 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
839 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
840 result = this->read_addr_from_reg (reg);
841 result += offset;
842 result_val = value_from_ulongest (address_type, result);
844 break;
845 case DW_OP_fbreg:
847 const gdb_byte *datastart;
848 size_t datalen;
850 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
852 /* Rather than create a whole new context, we simply
853 backup the current stack locally and install a new empty stack,
854 then reset it afterwards, effectively erasing whatever the
855 recursive call put there. */
856 std::vector<dwarf_stack_value> saved_stack = std::move (stack);
857 stack.clear ();
859 /* FIXME: cagney/2003-03-26: This code should be using
860 get_frame_base_address(), and then implement a dwarf2
861 specific this_base method. */
862 this->get_frame_base (&datastart, &datalen);
863 eval (datastart, datalen);
864 if (this->location == DWARF_VALUE_MEMORY)
865 result = fetch_address (0);
866 else if (this->location == DWARF_VALUE_REGISTER)
867 result = this->read_addr_from_reg (value_as_long (fetch (0)));
868 else
869 error (_("Not implemented: computing frame "
870 "base using explicit value operator"));
871 result = result + offset;
872 result_val = value_from_ulongest (address_type, result);
873 in_stack_memory = true;
875 /* Restore the content of the original stack. */
876 stack = std::move (saved_stack);
878 this->location = DWARF_VALUE_MEMORY;
880 break;
882 case DW_OP_dup:
883 result_val = fetch (0);
884 in_stack_memory = fetch_in_stack_memory (0);
885 break;
887 case DW_OP_drop:
888 pop ();
889 goto no_push;
891 case DW_OP_pick:
892 offset = *op_ptr++;
893 result_val = fetch (offset);
894 in_stack_memory = fetch_in_stack_memory (offset);
895 break;
897 case DW_OP_swap:
899 if (stack.size () < 2)
900 error (_("Not enough elements for "
901 "DW_OP_swap. Need 2, have %zu."),
902 stack.size ());
904 dwarf_stack_value &t1 = stack[stack.size () - 1];
905 dwarf_stack_value &t2 = stack[stack.size () - 2];
906 std::swap (t1, t2);
907 goto no_push;
910 case DW_OP_over:
911 result_val = fetch (1);
912 in_stack_memory = fetch_in_stack_memory (1);
913 break;
915 case DW_OP_rot:
917 if (stack.size () < 3)
918 error (_("Not enough elements for "
919 "DW_OP_rot. Need 3, have %zu."),
920 stack.size ());
922 dwarf_stack_value temp = stack[stack.size () - 1];
923 stack[stack.size () - 1] = stack[stack.size () - 2];
924 stack[stack.size () - 2] = stack[stack.size () - 3];
925 stack[stack.size () - 3] = temp;
926 goto no_push;
929 case DW_OP_deref:
930 case DW_OP_deref_size:
931 case DW_OP_deref_type:
932 case DW_OP_GNU_deref_type:
934 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
935 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
936 CORE_ADDR addr = fetch_address (0);
937 struct type *type;
939 pop ();
941 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
943 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
944 cu_offset type_die_cu_off = (cu_offset) uoffset;
945 type = get_base_type (type_die_cu_off, 0);
947 else
948 type = address_type;
950 this->read_mem (buf, addr, addr_size);
952 /* If the size of the object read from memory is different
953 from the type length, we need to zero-extend it. */
954 if (TYPE_LENGTH (type) != addr_size)
956 ULONGEST datum =
957 extract_unsigned_integer (buf, addr_size, byte_order);
959 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
960 store_unsigned_integer (buf, TYPE_LENGTH (type),
961 byte_order, datum);
964 result_val = value_from_contents_and_address (type, buf, addr);
965 break;
968 case DW_OP_abs:
969 case DW_OP_neg:
970 case DW_OP_not:
971 case DW_OP_plus_uconst:
973 /* Unary operations. */
974 result_val = fetch (0);
975 pop ();
977 switch (op)
979 case DW_OP_abs:
980 if (value_less (result_val,
981 value_zero (value_type (result_val), not_lval)))
982 result_val = value_neg (result_val);
983 break;
984 case DW_OP_neg:
985 result_val = value_neg (result_val);
986 break;
987 case DW_OP_not:
988 dwarf_require_integral (value_type (result_val));
989 result_val = value_complement (result_val);
990 break;
991 case DW_OP_plus_uconst:
992 dwarf_require_integral (value_type (result_val));
993 result = value_as_long (result_val);
994 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
995 result += reg;
996 result_val = value_from_ulongest (address_type, result);
997 break;
1000 break;
1002 case DW_OP_and:
1003 case DW_OP_div:
1004 case DW_OP_minus:
1005 case DW_OP_mod:
1006 case DW_OP_mul:
1007 case DW_OP_or:
1008 case DW_OP_plus:
1009 case DW_OP_shl:
1010 case DW_OP_shr:
1011 case DW_OP_shra:
1012 case DW_OP_xor:
1013 case DW_OP_le:
1014 case DW_OP_ge:
1015 case DW_OP_eq:
1016 case DW_OP_lt:
1017 case DW_OP_gt:
1018 case DW_OP_ne:
1020 /* Binary operations. */
1021 struct value *first, *second;
1023 second = fetch (0);
1024 pop ();
1026 first = fetch (0);
1027 pop ();
1029 if (! base_types_equal_p (value_type (first), value_type (second)))
1030 error (_("Incompatible types on DWARF stack"));
1032 switch (op)
1034 case DW_OP_and:
1035 dwarf_require_integral (value_type (first));
1036 dwarf_require_integral (value_type (second));
1037 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1038 break;
1039 case DW_OP_div:
1040 result_val = value_binop (first, second, BINOP_DIV);
1041 break;
1042 case DW_OP_minus:
1043 result_val = value_binop (first, second, BINOP_SUB);
1044 break;
1045 case DW_OP_mod:
1047 int cast_back = 0;
1048 struct type *orig_type = value_type (first);
1050 /* We have to special-case "old-style" untyped values
1051 -- these must have mod computed using unsigned
1052 math. */
1053 if (orig_type == address_type)
1055 struct type *utype
1056 = get_unsigned_type (this->gdbarch, orig_type);
1058 cast_back = 1;
1059 first = value_cast (utype, first);
1060 second = value_cast (utype, second);
1062 /* Note that value_binop doesn't handle float or
1063 decimal float here. This seems unimportant. */
1064 result_val = value_binop (first, second, BINOP_MOD);
1065 if (cast_back)
1066 result_val = value_cast (orig_type, result_val);
1068 break;
1069 case DW_OP_mul:
1070 result_val = value_binop (first, second, BINOP_MUL);
1071 break;
1072 case DW_OP_or:
1073 dwarf_require_integral (value_type (first));
1074 dwarf_require_integral (value_type (second));
1075 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1076 break;
1077 case DW_OP_plus:
1078 result_val = value_binop (first, second, BINOP_ADD);
1079 break;
1080 case DW_OP_shl:
1081 dwarf_require_integral (value_type (first));
1082 dwarf_require_integral (value_type (second));
1083 result_val = value_binop (first, second, BINOP_LSH);
1084 break;
1085 case DW_OP_shr:
1086 dwarf_require_integral (value_type (first));
1087 dwarf_require_integral (value_type (second));
1088 if (!TYPE_UNSIGNED (value_type (first)))
1090 struct type *utype
1091 = get_unsigned_type (this->gdbarch, value_type (first));
1093 first = value_cast (utype, first);
1096 result_val = value_binop (first, second, BINOP_RSH);
1097 /* Make sure we wind up with the same type we started
1098 with. */
1099 if (value_type (result_val) != value_type (second))
1100 result_val = value_cast (value_type (second), result_val);
1101 break;
1102 case DW_OP_shra:
1103 dwarf_require_integral (value_type (first));
1104 dwarf_require_integral (value_type (second));
1105 if (TYPE_UNSIGNED (value_type (first)))
1107 struct type *stype
1108 = get_signed_type (this->gdbarch, value_type (first));
1110 first = value_cast (stype, first);
1113 result_val = value_binop (first, second, BINOP_RSH);
1114 /* Make sure we wind up with the same type we started
1115 with. */
1116 if (value_type (result_val) != value_type (second))
1117 result_val = value_cast (value_type (second), result_val);
1118 break;
1119 case DW_OP_xor:
1120 dwarf_require_integral (value_type (first));
1121 dwarf_require_integral (value_type (second));
1122 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1123 break;
1124 case DW_OP_le:
1125 /* A <= B is !(B < A). */
1126 result = ! value_less (second, first);
1127 result_val = value_from_ulongest (address_type, result);
1128 break;
1129 case DW_OP_ge:
1130 /* A >= B is !(A < B). */
1131 result = ! value_less (first, second);
1132 result_val = value_from_ulongest (address_type, result);
1133 break;
1134 case DW_OP_eq:
1135 result = value_equal (first, second);
1136 result_val = value_from_ulongest (address_type, result);
1137 break;
1138 case DW_OP_lt:
1139 result = value_less (first, second);
1140 result_val = value_from_ulongest (address_type, result);
1141 break;
1142 case DW_OP_gt:
1143 /* A > B is B < A. */
1144 result = value_less (second, first);
1145 result_val = value_from_ulongest (address_type, result);
1146 break;
1147 case DW_OP_ne:
1148 result = ! value_equal (first, second);
1149 result_val = value_from_ulongest (address_type, result);
1150 break;
1151 default:
1152 internal_error (__FILE__, __LINE__,
1153 _("Can't be reached."));
1156 break;
1158 case DW_OP_call_frame_cfa:
1159 result = this->get_frame_cfa ();
1160 result_val = value_from_ulongest (address_type, result);
1161 in_stack_memory = true;
1162 break;
1164 case DW_OP_GNU_push_tls_address:
1165 case DW_OP_form_tls_address:
1166 /* Variable is at a constant offset in the thread-local
1167 storage block into the objfile for the current thread and
1168 the dynamic linker module containing this expression. Here
1169 we return returns the offset from that base. The top of the
1170 stack has the offset from the beginning of the thread
1171 control block at which the variable is located. Nothing
1172 should follow this operator, so the top of stack would be
1173 returned. */
1174 result = value_as_long (fetch (0));
1175 pop ();
1176 result = this->get_tls_address (result);
1177 result_val = value_from_ulongest (address_type, result);
1178 break;
1180 case DW_OP_skip:
1181 offset = extract_signed_integer (op_ptr, 2, byte_order);
1182 op_ptr += 2;
1183 op_ptr += offset;
1184 goto no_push;
1186 case DW_OP_bra:
1188 struct value *val;
1190 offset = extract_signed_integer (op_ptr, 2, byte_order);
1191 op_ptr += 2;
1192 val = fetch (0);
1193 dwarf_require_integral (value_type (val));
1194 if (value_as_long (val) != 0)
1195 op_ptr += offset;
1196 pop ();
1198 goto no_push;
1200 case DW_OP_nop:
1201 goto no_push;
1203 case DW_OP_piece:
1205 uint64_t size;
1207 /* Record the piece. */
1208 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1209 add_piece (8 * size, 0);
1211 /* Pop off the address/regnum, and reset the location
1212 type. */
1213 if (this->location != DWARF_VALUE_LITERAL
1214 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1215 pop ();
1216 this->location = DWARF_VALUE_MEMORY;
1218 goto no_push;
1220 case DW_OP_bit_piece:
1222 uint64_t size, uleb_offset;
1224 /* Record the piece. */
1225 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1226 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
1227 add_piece (size, uleb_offset);
1229 /* Pop off the address/regnum, and reset the location
1230 type. */
1231 if (this->location != DWARF_VALUE_LITERAL
1232 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1233 pop ();
1234 this->location = DWARF_VALUE_MEMORY;
1236 goto no_push;
1238 case DW_OP_GNU_uninit:
1239 if (op_ptr != op_end)
1240 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1241 "be the very last op."));
1243 this->initialized = 0;
1244 goto no_push;
1246 case DW_OP_call2:
1248 cu_offset cu_off
1249 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1250 op_ptr += 2;
1251 this->dwarf_call (cu_off);
1253 goto no_push;
1255 case DW_OP_call4:
1257 cu_offset cu_off
1258 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1259 op_ptr += 4;
1260 this->dwarf_call (cu_off);
1262 goto no_push;
1264 case DW_OP_GNU_variable_value:
1266 sect_offset sect_off
1267 = (sect_offset) extract_unsigned_integer (op_ptr,
1268 this->ref_addr_size,
1269 byte_order);
1270 op_ptr += this->ref_addr_size;
1271 result_val = this->dwarf_variable_value (sect_off);
1273 break;
1275 case DW_OP_entry_value:
1276 case DW_OP_GNU_entry_value:
1278 uint64_t len;
1279 CORE_ADDR deref_size;
1280 union call_site_parameter_u kind_u;
1282 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1283 if (op_ptr + len > op_end)
1284 error (_("DW_OP_entry_value: too few bytes available."));
1286 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1287 if (kind_u.dwarf_reg != -1)
1289 op_ptr += len;
1290 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1291 kind_u,
1292 -1 /* deref_size */);
1293 goto no_push;
1296 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1297 op_ptr + len,
1298 &deref_size);
1299 if (kind_u.dwarf_reg != -1)
1301 if (deref_size == -1)
1302 deref_size = this->addr_size;
1303 op_ptr += len;
1304 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1305 kind_u, deref_size);
1306 goto no_push;
1309 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1310 "supported only for single DW_OP_reg* "
1311 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1314 case DW_OP_GNU_parameter_ref:
1316 union call_site_parameter_u kind_u;
1318 kind_u.param_cu_off
1319 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1320 op_ptr += 4;
1321 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1322 kind_u,
1323 -1 /* deref_size */);
1325 goto no_push;
1327 case DW_OP_const_type:
1328 case DW_OP_GNU_const_type:
1330 int n;
1331 const gdb_byte *data;
1332 struct type *type;
1334 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1335 cu_offset type_die_cu_off = (cu_offset) uoffset;
1337 n = *op_ptr++;
1338 data = op_ptr;
1339 op_ptr += n;
1341 type = get_base_type (type_die_cu_off, n);
1342 result_val = value_from_contents (type, data);
1344 break;
1346 case DW_OP_regval_type:
1347 case DW_OP_GNU_regval_type:
1349 struct type *type;
1351 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1352 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1353 cu_offset type_die_cu_off = (cu_offset) uoffset;
1355 type = get_base_type (type_die_cu_off, 0);
1356 result_val = this->get_reg_value (type, reg);
1358 break;
1360 case DW_OP_convert:
1361 case DW_OP_GNU_convert:
1362 case DW_OP_reinterpret:
1363 case DW_OP_GNU_reinterpret:
1365 struct type *type;
1367 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1368 cu_offset type_die_cu_off = (cu_offset) uoffset;
1370 if (to_underlying (type_die_cu_off) == 0)
1371 type = address_type;
1372 else
1373 type = get_base_type (type_die_cu_off, 0);
1375 result_val = fetch (0);
1376 pop ();
1378 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1379 result_val = value_cast (type, result_val);
1380 else if (type == value_type (result_val))
1382 /* Nothing. */
1384 else if (TYPE_LENGTH (type)
1385 != TYPE_LENGTH (value_type (result_val)))
1386 error (_("DW_OP_reinterpret has wrong size"));
1387 else
1388 result_val
1389 = value_from_contents (type,
1390 value_contents_all (result_val));
1392 break;
1394 case DW_OP_push_object_address:
1395 /* Return the address of the object we are currently observing. */
1396 result = this->get_object_address ();
1397 result_val = value_from_ulongest (address_type, result);
1398 break;
1400 default:
1401 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1404 /* Most things push a result value. */
1405 gdb_assert (result_val != NULL);
1406 push (result_val, in_stack_memory);
1407 no_push:
1411 /* To simplify our main caller, if the result is an implicit
1412 pointer, then make a pieced value. This is ok because we can't
1413 have implicit pointers in contexts where pieces are invalid. */
1414 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1415 add_piece (8 * this->addr_size, 0);
1417 this->recursion_depth--;
1418 gdb_assert (this->recursion_depth >= 0);
1421 void
1422 _initialize_dwarf2expr (void)
1424 dwarf_arch_cookie
1425 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);