[binutils, ARM, 12/16] Scalar Low Overhead loop instructions for Armv8.1-M Mainline
[binutils-gdb.git] / gdb / dwarf2expr.c
blobe412e182c01029c43a59a7aa802a4b9ec2f166fa
1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2019 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "symtab.h"
24 #include "gdbtypes.h"
25 #include "value.h"
26 #include "gdbcore.h"
27 #include "dwarf2.h"
28 #include "dwarf2expr.h"
29 #include "dwarf2loc.h"
30 #include "common/underlying.h"
32 /* Cookie for gdbarch data. */
34 static struct gdbarch_data *dwarf_arch_cookie;
36 /* This holds gdbarch-specific types used by the DWARF expression
37 evaluator. See comments in execute_stack_op. */
39 struct dwarf_gdbarch_types
41 struct type *dw_types[3];
44 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
46 static void *
47 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
49 struct dwarf_gdbarch_types *types
50 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
52 /* The types themselves are lazily initialized. */
54 return types;
57 /* Return the type used for DWARF operations where the type is
58 unspecified in the DWARF spec. Only certain sizes are
59 supported. */
61 struct type *
62 dwarf_expr_context::address_type () const
64 struct dwarf_gdbarch_types *types
65 = (struct dwarf_gdbarch_types *) gdbarch_data (this->gdbarch,
66 dwarf_arch_cookie);
67 int ndx;
69 if (this->addr_size == 2)
70 ndx = 0;
71 else if (this->addr_size == 4)
72 ndx = 1;
73 else if (this->addr_size == 8)
74 ndx = 2;
75 else
76 error (_("Unsupported address size in DWARF expressions: %d bits"),
77 8 * this->addr_size);
79 if (types->dw_types[ndx] == NULL)
80 types->dw_types[ndx]
81 = arch_integer_type (this->gdbarch,
82 8 * this->addr_size,
83 0, "<signed DWARF address type>");
85 return types->dw_types[ndx];
88 /* Create a new context for the expression evaluator. */
90 dwarf_expr_context::dwarf_expr_context ()
91 : gdbarch (NULL),
92 addr_size (0),
93 ref_addr_size (0),
94 offset (0),
95 recursion_depth (0),
96 max_recursion_depth (0x100),
97 location (DWARF_VALUE_MEMORY),
98 len (0),
99 data (NULL),
100 initialized (0)
104 /* Push VALUE onto the stack. */
106 void
107 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
109 stack.emplace_back (value, in_stack_memory);
112 /* Push VALUE onto the stack. */
114 void
115 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
117 push (value_from_ulongest (address_type (), value), in_stack_memory);
120 /* Pop the top item off of the stack. */
122 void
123 dwarf_expr_context::pop ()
125 if (stack.empty ())
126 error (_("dwarf expression stack underflow"));
128 stack.pop_back ();
131 /* Retrieve the N'th item on the stack. */
133 struct value *
134 dwarf_expr_context::fetch (int n)
136 if (stack.size () <= n)
137 error (_("Asked for position %d of stack, "
138 "stack only has %zu elements on it."),
139 n, stack.size ());
140 return stack[stack.size () - (1 + n)].value;
143 /* Require that TYPE be an integral type; throw an exception if not. */
145 static void
146 dwarf_require_integral (struct type *type)
148 if (TYPE_CODE (type) != TYPE_CODE_INT
149 && TYPE_CODE (type) != TYPE_CODE_CHAR
150 && TYPE_CODE (type) != TYPE_CODE_BOOL)
151 error (_("integral type expected in DWARF expression"));
154 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
155 type. */
157 static struct type *
158 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
160 switch (TYPE_LENGTH (type))
162 case 1:
163 return builtin_type (gdbarch)->builtin_uint8;
164 case 2:
165 return builtin_type (gdbarch)->builtin_uint16;
166 case 4:
167 return builtin_type (gdbarch)->builtin_uint32;
168 case 8:
169 return builtin_type (gdbarch)->builtin_uint64;
170 default:
171 error (_("no unsigned variant found for type, while evaluating "
172 "DWARF expression"));
176 /* Return the signed form of TYPE. TYPE is necessarily an integral
177 type. */
179 static struct type *
180 get_signed_type (struct gdbarch *gdbarch, struct type *type)
182 switch (TYPE_LENGTH (type))
184 case 1:
185 return builtin_type (gdbarch)->builtin_int8;
186 case 2:
187 return builtin_type (gdbarch)->builtin_int16;
188 case 4:
189 return builtin_type (gdbarch)->builtin_int32;
190 case 8:
191 return builtin_type (gdbarch)->builtin_int64;
192 default:
193 error (_("no signed variant found for type, while evaluating "
194 "DWARF expression"));
198 /* Retrieve the N'th item on the stack, converted to an address. */
200 CORE_ADDR
201 dwarf_expr_context::fetch_address (int n)
203 struct value *result_val = fetch (n);
204 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
205 ULONGEST result;
207 dwarf_require_integral (value_type (result_val));
208 result = extract_unsigned_integer (value_contents (result_val),
209 TYPE_LENGTH (value_type (result_val)),
210 byte_order);
212 /* For most architectures, calling extract_unsigned_integer() alone
213 is sufficient for extracting an address. However, some
214 architectures (e.g. MIPS) use signed addresses and using
215 extract_unsigned_integer() will not produce a correct
216 result. Make sure we invoke gdbarch_integer_to_address()
217 for those architectures which require it. */
218 if (gdbarch_integer_to_address_p (this->gdbarch))
220 gdb_byte *buf = (gdb_byte *) alloca (this->addr_size);
221 struct type *int_type = get_unsigned_type (this->gdbarch,
222 value_type (result_val));
224 store_unsigned_integer (buf, this->addr_size, byte_order, result);
225 return gdbarch_integer_to_address (this->gdbarch, int_type, buf);
228 return (CORE_ADDR) result;
231 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
233 bool
234 dwarf_expr_context::fetch_in_stack_memory (int n)
236 if (stack.size () <= n)
237 error (_("Asked for position %d of stack, "
238 "stack only has %zu elements on it."),
239 n, stack.size ());
240 return stack[stack.size () - (1 + n)].in_stack_memory;
243 /* Return true if the expression stack is empty. */
245 bool
246 dwarf_expr_context::stack_empty_p () const
248 return stack.empty ();
251 /* Add a new piece to the dwarf_expr_context's piece list. */
252 void
253 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
255 this->pieces.emplace_back ();
256 dwarf_expr_piece &p = this->pieces.back ();
258 p.location = this->location;
259 p.size = size;
260 p.offset = offset;
262 if (p.location == DWARF_VALUE_LITERAL)
264 p.v.literal.data = this->data;
265 p.v.literal.length = this->len;
267 else if (stack_empty_p ())
269 p.location = DWARF_VALUE_OPTIMIZED_OUT;
270 /* Also reset the context's location, for our callers. This is
271 a somewhat strange approach, but this lets us avoid setting
272 the location to DWARF_VALUE_MEMORY in all the individual
273 cases in the evaluator. */
274 this->location = DWARF_VALUE_OPTIMIZED_OUT;
276 else if (p.location == DWARF_VALUE_MEMORY)
278 p.v.mem.addr = fetch_address (0);
279 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
281 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
283 p.v.ptr.die_sect_off = (sect_offset) this->len;
284 p.v.ptr.offset = value_as_long (fetch (0));
286 else if (p.location == DWARF_VALUE_REGISTER)
287 p.v.regno = value_as_long (fetch (0));
288 else
290 p.v.value = fetch (0);
294 /* Evaluate the expression at ADDR (LEN bytes long). */
296 void
297 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
299 int old_recursion_depth = this->recursion_depth;
301 execute_stack_op (addr, addr + len);
303 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
305 gdb_assert (this->recursion_depth == old_recursion_depth);
308 /* Helper to read a uleb128 value or throw an error. */
310 const gdb_byte *
311 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
312 uint64_t *r)
314 buf = gdb_read_uleb128 (buf, buf_end, r);
315 if (buf == NULL)
316 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
317 return buf;
320 /* Helper to read a sleb128 value or throw an error. */
322 const gdb_byte *
323 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
324 int64_t *r)
326 buf = gdb_read_sleb128 (buf, buf_end, r);
327 if (buf == NULL)
328 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
329 return buf;
332 const gdb_byte *
333 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
335 buf = gdb_skip_leb128 (buf, buf_end);
336 if (buf == NULL)
337 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
338 return buf;
342 /* Check that the current operator is either at the end of an
343 expression, or that it is followed by a composition operator or by
344 DW_OP_GNU_uninit (which should terminate the expression). */
346 void
347 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
348 const char *op_name)
350 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
351 && *op_ptr != DW_OP_GNU_uninit)
352 error (_("DWARF-2 expression error: `%s' operations must be "
353 "used either alone or in conjunction with DW_OP_piece "
354 "or DW_OP_bit_piece."),
355 op_name);
358 /* Return true iff the types T1 and T2 are "the same". This only does
359 checks that might reasonably be needed to compare DWARF base
360 types. */
362 static int
363 base_types_equal_p (struct type *t1, struct type *t2)
365 if (TYPE_CODE (t1) != TYPE_CODE (t2))
366 return 0;
367 if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
368 return 0;
369 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
372 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
373 DWARF register number. Otherwise return -1. */
376 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
378 uint64_t dwarf_reg;
380 if (buf_end <= buf)
381 return -1;
382 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
384 if (buf_end - buf != 1)
385 return -1;
386 return *buf - DW_OP_reg0;
389 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
391 buf++;
392 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
393 if (buf == NULL)
394 return -1;
395 buf = gdb_skip_leb128 (buf, buf_end);
396 if (buf == NULL)
397 return -1;
399 else if (*buf == DW_OP_regx)
401 buf++;
402 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
403 if (buf == NULL)
404 return -1;
406 else
407 return -1;
408 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
409 return -1;
410 return dwarf_reg;
413 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
414 DW_OP_deref* return the DWARF register number. Otherwise return -1.
415 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
416 size from DW_OP_deref_size. */
419 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
420 CORE_ADDR *deref_size_return)
422 uint64_t dwarf_reg;
423 int64_t offset;
425 if (buf_end <= buf)
426 return -1;
428 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
430 dwarf_reg = *buf - DW_OP_breg0;
431 buf++;
432 if (buf >= buf_end)
433 return -1;
435 else if (*buf == DW_OP_bregx)
437 buf++;
438 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
439 if (buf == NULL)
440 return -1;
441 if ((int) dwarf_reg != dwarf_reg)
442 return -1;
444 else
445 return -1;
447 buf = gdb_read_sleb128 (buf, buf_end, &offset);
448 if (buf == NULL)
449 return -1;
450 if (offset != 0)
451 return -1;
453 if (*buf == DW_OP_deref)
455 buf++;
456 *deref_size_return = -1;
458 else if (*buf == DW_OP_deref_size)
460 buf++;
461 if (buf >= buf_end)
462 return -1;
463 *deref_size_return = *buf++;
465 else
466 return -1;
468 if (buf != buf_end)
469 return -1;
471 return dwarf_reg;
474 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
475 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
478 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
479 CORE_ADDR *fb_offset_return)
481 int64_t fb_offset;
483 if (buf_end <= buf)
484 return 0;
486 if (*buf != DW_OP_fbreg)
487 return 0;
488 buf++;
490 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
491 if (buf == NULL)
492 return 0;
493 *fb_offset_return = fb_offset;
494 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
495 return 0;
497 return 1;
500 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
501 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
502 The matched SP register number depends on GDBARCH. */
505 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
506 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
508 uint64_t dwarf_reg;
509 int64_t sp_offset;
511 if (buf_end <= buf)
512 return 0;
513 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
515 dwarf_reg = *buf - DW_OP_breg0;
516 buf++;
518 else
520 if (*buf != DW_OP_bregx)
521 return 0;
522 buf++;
523 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
524 if (buf == NULL)
525 return 0;
528 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
529 != gdbarch_sp_regnum (gdbarch))
530 return 0;
532 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
533 if (buf == NULL)
534 return 0;
535 *sp_offset_return = sp_offset;
536 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
537 return 0;
539 return 1;
542 /* The engine for the expression evaluator. Using the context in this
543 object, evaluate the expression between OP_PTR and OP_END. */
545 void
546 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
547 const gdb_byte *op_end)
549 enum bfd_endian byte_order = gdbarch_byte_order (this->gdbarch);
550 /* Old-style "untyped" DWARF values need special treatment in a
551 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
552 a special type for these values so we can distinguish them from
553 values that have an explicit type, because explicitly-typed
554 values do not need special treatment. This special type must be
555 different (in the `==' sense) from any base type coming from the
556 CU. */
557 struct type *address_type = this->address_type ();
559 this->location = DWARF_VALUE_MEMORY;
560 this->initialized = 1; /* Default is initialized. */
562 if (this->recursion_depth > this->max_recursion_depth)
563 error (_("DWARF-2 expression error: Loop detected (%d)."),
564 this->recursion_depth);
565 this->recursion_depth++;
567 while (op_ptr < op_end)
569 enum dwarf_location_atom op = (enum dwarf_location_atom) *op_ptr++;
570 ULONGEST result;
571 /* Assume the value is not in stack memory.
572 Code that knows otherwise sets this to true.
573 Some arithmetic on stack addresses can probably be assumed to still
574 be a stack address, but we skip this complication for now.
575 This is just an optimization, so it's always ok to punt
576 and leave this as false. */
577 bool in_stack_memory = false;
578 uint64_t uoffset, reg;
579 int64_t offset;
580 struct value *result_val = NULL;
582 /* The DWARF expression might have a bug causing an infinite
583 loop. In that case, quitting is the only way out. */
584 QUIT;
586 switch (op)
588 case DW_OP_lit0:
589 case DW_OP_lit1:
590 case DW_OP_lit2:
591 case DW_OP_lit3:
592 case DW_OP_lit4:
593 case DW_OP_lit5:
594 case DW_OP_lit6:
595 case DW_OP_lit7:
596 case DW_OP_lit8:
597 case DW_OP_lit9:
598 case DW_OP_lit10:
599 case DW_OP_lit11:
600 case DW_OP_lit12:
601 case DW_OP_lit13:
602 case DW_OP_lit14:
603 case DW_OP_lit15:
604 case DW_OP_lit16:
605 case DW_OP_lit17:
606 case DW_OP_lit18:
607 case DW_OP_lit19:
608 case DW_OP_lit20:
609 case DW_OP_lit21:
610 case DW_OP_lit22:
611 case DW_OP_lit23:
612 case DW_OP_lit24:
613 case DW_OP_lit25:
614 case DW_OP_lit26:
615 case DW_OP_lit27:
616 case DW_OP_lit28:
617 case DW_OP_lit29:
618 case DW_OP_lit30:
619 case DW_OP_lit31:
620 result = op - DW_OP_lit0;
621 result_val = value_from_ulongest (address_type, result);
622 break;
624 case DW_OP_addr:
625 result = extract_unsigned_integer (op_ptr,
626 this->addr_size, byte_order);
627 op_ptr += this->addr_size;
628 /* Some versions of GCC emit DW_OP_addr before
629 DW_OP_GNU_push_tls_address. In this case the value is an
630 index, not an address. We don't support things like
631 branching between the address and the TLS op. */
632 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
633 result += this->offset;
634 result_val = value_from_ulongest (address_type, result);
635 break;
637 case DW_OP_GNU_addr_index:
638 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
639 result = this->get_addr_index (uoffset);
640 result += this->offset;
641 result_val = value_from_ulongest (address_type, result);
642 break;
643 case DW_OP_GNU_const_index:
644 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
645 result = this->get_addr_index (uoffset);
646 result_val = value_from_ulongest (address_type, result);
647 break;
649 case DW_OP_const1u:
650 result = extract_unsigned_integer (op_ptr, 1, byte_order);
651 result_val = value_from_ulongest (address_type, result);
652 op_ptr += 1;
653 break;
654 case DW_OP_const1s:
655 result = extract_signed_integer (op_ptr, 1, byte_order);
656 result_val = value_from_ulongest (address_type, result);
657 op_ptr += 1;
658 break;
659 case DW_OP_const2u:
660 result = extract_unsigned_integer (op_ptr, 2, byte_order);
661 result_val = value_from_ulongest (address_type, result);
662 op_ptr += 2;
663 break;
664 case DW_OP_const2s:
665 result = extract_signed_integer (op_ptr, 2, byte_order);
666 result_val = value_from_ulongest (address_type, result);
667 op_ptr += 2;
668 break;
669 case DW_OP_const4u:
670 result = extract_unsigned_integer (op_ptr, 4, byte_order);
671 result_val = value_from_ulongest (address_type, result);
672 op_ptr += 4;
673 break;
674 case DW_OP_const4s:
675 result = extract_signed_integer (op_ptr, 4, byte_order);
676 result_val = value_from_ulongest (address_type, result);
677 op_ptr += 4;
678 break;
679 case DW_OP_const8u:
680 result = extract_unsigned_integer (op_ptr, 8, byte_order);
681 result_val = value_from_ulongest (address_type, result);
682 op_ptr += 8;
683 break;
684 case DW_OP_const8s:
685 result = extract_signed_integer (op_ptr, 8, byte_order);
686 result_val = value_from_ulongest (address_type, result);
687 op_ptr += 8;
688 break;
689 case DW_OP_constu:
690 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
691 result = uoffset;
692 result_val = value_from_ulongest (address_type, result);
693 break;
694 case DW_OP_consts:
695 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
696 result = offset;
697 result_val = value_from_ulongest (address_type, result);
698 break;
700 /* The DW_OP_reg operations are required to occur alone in
701 location expressions. */
702 case DW_OP_reg0:
703 case DW_OP_reg1:
704 case DW_OP_reg2:
705 case DW_OP_reg3:
706 case DW_OP_reg4:
707 case DW_OP_reg5:
708 case DW_OP_reg6:
709 case DW_OP_reg7:
710 case DW_OP_reg8:
711 case DW_OP_reg9:
712 case DW_OP_reg10:
713 case DW_OP_reg11:
714 case DW_OP_reg12:
715 case DW_OP_reg13:
716 case DW_OP_reg14:
717 case DW_OP_reg15:
718 case DW_OP_reg16:
719 case DW_OP_reg17:
720 case DW_OP_reg18:
721 case DW_OP_reg19:
722 case DW_OP_reg20:
723 case DW_OP_reg21:
724 case DW_OP_reg22:
725 case DW_OP_reg23:
726 case DW_OP_reg24:
727 case DW_OP_reg25:
728 case DW_OP_reg26:
729 case DW_OP_reg27:
730 case DW_OP_reg28:
731 case DW_OP_reg29:
732 case DW_OP_reg30:
733 case DW_OP_reg31:
734 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
736 result = op - DW_OP_reg0;
737 result_val = value_from_ulongest (address_type, result);
738 this->location = DWARF_VALUE_REGISTER;
739 break;
741 case DW_OP_regx:
742 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
743 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
745 result = reg;
746 result_val = value_from_ulongest (address_type, result);
747 this->location = DWARF_VALUE_REGISTER;
748 break;
750 case DW_OP_implicit_value:
752 uint64_t len;
754 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
755 if (op_ptr + len > op_end)
756 error (_("DW_OP_implicit_value: too few bytes available."));
757 this->len = len;
758 this->data = op_ptr;
759 this->location = DWARF_VALUE_LITERAL;
760 op_ptr += len;
761 dwarf_expr_require_composition (op_ptr, op_end,
762 "DW_OP_implicit_value");
764 goto no_push;
766 case DW_OP_stack_value:
767 this->location = DWARF_VALUE_STACK;
768 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
769 goto no_push;
771 case DW_OP_implicit_pointer:
772 case DW_OP_GNU_implicit_pointer:
774 int64_t len;
776 if (this->ref_addr_size == -1)
777 error (_("DWARF-2 expression error: DW_OP_implicit_pointer "
778 "is not allowed in frame context"));
780 /* The referred-to DIE of sect_offset kind. */
781 this->len = extract_unsigned_integer (op_ptr, this->ref_addr_size,
782 byte_order);
783 op_ptr += this->ref_addr_size;
785 /* The byte offset into the data. */
786 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
787 result = (ULONGEST) len;
788 result_val = value_from_ulongest (address_type, result);
790 this->location = DWARF_VALUE_IMPLICIT_POINTER;
791 dwarf_expr_require_composition (op_ptr, op_end,
792 "DW_OP_implicit_pointer");
794 break;
796 case DW_OP_breg0:
797 case DW_OP_breg1:
798 case DW_OP_breg2:
799 case DW_OP_breg3:
800 case DW_OP_breg4:
801 case DW_OP_breg5:
802 case DW_OP_breg6:
803 case DW_OP_breg7:
804 case DW_OP_breg8:
805 case DW_OP_breg9:
806 case DW_OP_breg10:
807 case DW_OP_breg11:
808 case DW_OP_breg12:
809 case DW_OP_breg13:
810 case DW_OP_breg14:
811 case DW_OP_breg15:
812 case DW_OP_breg16:
813 case DW_OP_breg17:
814 case DW_OP_breg18:
815 case DW_OP_breg19:
816 case DW_OP_breg20:
817 case DW_OP_breg21:
818 case DW_OP_breg22:
819 case DW_OP_breg23:
820 case DW_OP_breg24:
821 case DW_OP_breg25:
822 case DW_OP_breg26:
823 case DW_OP_breg27:
824 case DW_OP_breg28:
825 case DW_OP_breg29:
826 case DW_OP_breg30:
827 case DW_OP_breg31:
829 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
830 result = this->read_addr_from_reg (op - DW_OP_breg0);
831 result += offset;
832 result_val = value_from_ulongest (address_type, result);
834 break;
835 case DW_OP_bregx:
837 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
838 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
839 result = this->read_addr_from_reg (reg);
840 result += offset;
841 result_val = value_from_ulongest (address_type, result);
843 break;
844 case DW_OP_fbreg:
846 const gdb_byte *datastart;
847 size_t datalen;
849 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
851 /* Rather than create a whole new context, we simply
852 backup the current stack locally and install a new empty stack,
853 then reset it afterwards, effectively erasing whatever the
854 recursive call put there. */
855 std::vector<dwarf_stack_value> saved_stack = std::move (stack);
856 stack.clear ();
858 /* FIXME: cagney/2003-03-26: This code should be using
859 get_frame_base_address(), and then implement a dwarf2
860 specific this_base method. */
861 this->get_frame_base (&datastart, &datalen);
862 eval (datastart, datalen);
863 if (this->location == DWARF_VALUE_MEMORY)
864 result = fetch_address (0);
865 else if (this->location == DWARF_VALUE_REGISTER)
866 result = this->read_addr_from_reg (value_as_long (fetch (0)));
867 else
868 error (_("Not implemented: computing frame "
869 "base using explicit value operator"));
870 result = result + offset;
871 result_val = value_from_ulongest (address_type, result);
872 in_stack_memory = true;
874 /* Restore the content of the original stack. */
875 stack = std::move (saved_stack);
877 this->location = DWARF_VALUE_MEMORY;
879 break;
881 case DW_OP_dup:
882 result_val = fetch (0);
883 in_stack_memory = fetch_in_stack_memory (0);
884 break;
886 case DW_OP_drop:
887 pop ();
888 goto no_push;
890 case DW_OP_pick:
891 offset = *op_ptr++;
892 result_val = fetch (offset);
893 in_stack_memory = fetch_in_stack_memory (offset);
894 break;
896 case DW_OP_swap:
898 if (stack.size () < 2)
899 error (_("Not enough elements for "
900 "DW_OP_swap. Need 2, have %zu."),
901 stack.size ());
903 dwarf_stack_value &t1 = stack[stack.size () - 1];
904 dwarf_stack_value &t2 = stack[stack.size () - 2];
905 std::swap (t1, t2);
906 goto no_push;
909 case DW_OP_over:
910 result_val = fetch (1);
911 in_stack_memory = fetch_in_stack_memory (1);
912 break;
914 case DW_OP_rot:
916 if (stack.size () < 3)
917 error (_("Not enough elements for "
918 "DW_OP_rot. Need 3, have %zu."),
919 stack.size ());
921 dwarf_stack_value temp = stack[stack.size () - 1];
922 stack[stack.size () - 1] = stack[stack.size () - 2];
923 stack[stack.size () - 2] = stack[stack.size () - 3];
924 stack[stack.size () - 3] = temp;
925 goto no_push;
928 case DW_OP_deref:
929 case DW_OP_deref_size:
930 case DW_OP_deref_type:
931 case DW_OP_GNU_deref_type:
933 int addr_size = (op == DW_OP_deref ? this->addr_size : *op_ptr++);
934 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
935 CORE_ADDR addr = fetch_address (0);
936 struct type *type;
938 pop ();
940 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
942 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
943 cu_offset type_die_cu_off = (cu_offset) uoffset;
944 type = get_base_type (type_die_cu_off, 0);
946 else
947 type = address_type;
949 this->read_mem (buf, addr, addr_size);
951 /* If the size of the object read from memory is different
952 from the type length, we need to zero-extend it. */
953 if (TYPE_LENGTH (type) != addr_size)
955 ULONGEST datum =
956 extract_unsigned_integer (buf, addr_size, byte_order);
958 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
959 store_unsigned_integer (buf, TYPE_LENGTH (type),
960 byte_order, datum);
963 result_val = value_from_contents_and_address (type, buf, addr);
964 break;
967 case DW_OP_abs:
968 case DW_OP_neg:
969 case DW_OP_not:
970 case DW_OP_plus_uconst:
972 /* Unary operations. */
973 result_val = fetch (0);
974 pop ();
976 switch (op)
978 case DW_OP_abs:
979 if (value_less (result_val,
980 value_zero (value_type (result_val), not_lval)))
981 result_val = value_neg (result_val);
982 break;
983 case DW_OP_neg:
984 result_val = value_neg (result_val);
985 break;
986 case DW_OP_not:
987 dwarf_require_integral (value_type (result_val));
988 result_val = value_complement (result_val);
989 break;
990 case DW_OP_plus_uconst:
991 dwarf_require_integral (value_type (result_val));
992 result = value_as_long (result_val);
993 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
994 result += reg;
995 result_val = value_from_ulongest (address_type, result);
996 break;
999 break;
1001 case DW_OP_and:
1002 case DW_OP_div:
1003 case DW_OP_minus:
1004 case DW_OP_mod:
1005 case DW_OP_mul:
1006 case DW_OP_or:
1007 case DW_OP_plus:
1008 case DW_OP_shl:
1009 case DW_OP_shr:
1010 case DW_OP_shra:
1011 case DW_OP_xor:
1012 case DW_OP_le:
1013 case DW_OP_ge:
1014 case DW_OP_eq:
1015 case DW_OP_lt:
1016 case DW_OP_gt:
1017 case DW_OP_ne:
1019 /* Binary operations. */
1020 struct value *first, *second;
1022 second = fetch (0);
1023 pop ();
1025 first = fetch (0);
1026 pop ();
1028 if (! base_types_equal_p (value_type (first), value_type (second)))
1029 error (_("Incompatible types on DWARF stack"));
1031 switch (op)
1033 case DW_OP_and:
1034 dwarf_require_integral (value_type (first));
1035 dwarf_require_integral (value_type (second));
1036 result_val = value_binop (first, second, BINOP_BITWISE_AND);
1037 break;
1038 case DW_OP_div:
1039 result_val = value_binop (first, second, BINOP_DIV);
1040 break;
1041 case DW_OP_minus:
1042 result_val = value_binop (first, second, BINOP_SUB);
1043 break;
1044 case DW_OP_mod:
1046 int cast_back = 0;
1047 struct type *orig_type = value_type (first);
1049 /* We have to special-case "old-style" untyped values
1050 -- these must have mod computed using unsigned
1051 math. */
1052 if (orig_type == address_type)
1054 struct type *utype
1055 = get_unsigned_type (this->gdbarch, orig_type);
1057 cast_back = 1;
1058 first = value_cast (utype, first);
1059 second = value_cast (utype, second);
1061 /* Note that value_binop doesn't handle float or
1062 decimal float here. This seems unimportant. */
1063 result_val = value_binop (first, second, BINOP_MOD);
1064 if (cast_back)
1065 result_val = value_cast (orig_type, result_val);
1067 break;
1068 case DW_OP_mul:
1069 result_val = value_binop (first, second, BINOP_MUL);
1070 break;
1071 case DW_OP_or:
1072 dwarf_require_integral (value_type (first));
1073 dwarf_require_integral (value_type (second));
1074 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
1075 break;
1076 case DW_OP_plus:
1077 result_val = value_binop (first, second, BINOP_ADD);
1078 break;
1079 case DW_OP_shl:
1080 dwarf_require_integral (value_type (first));
1081 dwarf_require_integral (value_type (second));
1082 result_val = value_binop (first, second, BINOP_LSH);
1083 break;
1084 case DW_OP_shr:
1085 dwarf_require_integral (value_type (first));
1086 dwarf_require_integral (value_type (second));
1087 if (!TYPE_UNSIGNED (value_type (first)))
1089 struct type *utype
1090 = get_unsigned_type (this->gdbarch, value_type (first));
1092 first = value_cast (utype, first);
1095 result_val = value_binop (first, second, BINOP_RSH);
1096 /* Make sure we wind up with the same type we started
1097 with. */
1098 if (value_type (result_val) != value_type (second))
1099 result_val = value_cast (value_type (second), result_val);
1100 break;
1101 case DW_OP_shra:
1102 dwarf_require_integral (value_type (first));
1103 dwarf_require_integral (value_type (second));
1104 if (TYPE_UNSIGNED (value_type (first)))
1106 struct type *stype
1107 = get_signed_type (this->gdbarch, value_type (first));
1109 first = value_cast (stype, first);
1112 result_val = value_binop (first, second, BINOP_RSH);
1113 /* Make sure we wind up with the same type we started
1114 with. */
1115 if (value_type (result_val) != value_type (second))
1116 result_val = value_cast (value_type (second), result_val);
1117 break;
1118 case DW_OP_xor:
1119 dwarf_require_integral (value_type (first));
1120 dwarf_require_integral (value_type (second));
1121 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
1122 break;
1123 case DW_OP_le:
1124 /* A <= B is !(B < A). */
1125 result = ! value_less (second, first);
1126 result_val = value_from_ulongest (address_type, result);
1127 break;
1128 case DW_OP_ge:
1129 /* A >= B is !(A < B). */
1130 result = ! value_less (first, second);
1131 result_val = value_from_ulongest (address_type, result);
1132 break;
1133 case DW_OP_eq:
1134 result = value_equal (first, second);
1135 result_val = value_from_ulongest (address_type, result);
1136 break;
1137 case DW_OP_lt:
1138 result = value_less (first, second);
1139 result_val = value_from_ulongest (address_type, result);
1140 break;
1141 case DW_OP_gt:
1142 /* A > B is B < A. */
1143 result = value_less (second, first);
1144 result_val = value_from_ulongest (address_type, result);
1145 break;
1146 case DW_OP_ne:
1147 result = ! value_equal (first, second);
1148 result_val = value_from_ulongest (address_type, result);
1149 break;
1150 default:
1151 internal_error (__FILE__, __LINE__,
1152 _("Can't be reached."));
1155 break;
1157 case DW_OP_call_frame_cfa:
1158 result = this->get_frame_cfa ();
1159 result_val = value_from_ulongest (address_type, result);
1160 in_stack_memory = true;
1161 break;
1163 case DW_OP_GNU_push_tls_address:
1164 case DW_OP_form_tls_address:
1165 /* Variable is at a constant offset in the thread-local
1166 storage block into the objfile for the current thread and
1167 the dynamic linker module containing this expression. Here
1168 we return returns the offset from that base. The top of the
1169 stack has the offset from the beginning of the thread
1170 control block at which the variable is located. Nothing
1171 should follow this operator, so the top of stack would be
1172 returned. */
1173 result = value_as_long (fetch (0));
1174 pop ();
1175 result = this->get_tls_address (result);
1176 result_val = value_from_ulongest (address_type, result);
1177 break;
1179 case DW_OP_skip:
1180 offset = extract_signed_integer (op_ptr, 2, byte_order);
1181 op_ptr += 2;
1182 op_ptr += offset;
1183 goto no_push;
1185 case DW_OP_bra:
1187 struct value *val;
1189 offset = extract_signed_integer (op_ptr, 2, byte_order);
1190 op_ptr += 2;
1191 val = fetch (0);
1192 dwarf_require_integral (value_type (val));
1193 if (value_as_long (val) != 0)
1194 op_ptr += offset;
1195 pop ();
1197 goto no_push;
1199 case DW_OP_nop:
1200 goto no_push;
1202 case DW_OP_piece:
1204 uint64_t size;
1206 /* Record the piece. */
1207 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1208 add_piece (8 * size, 0);
1210 /* Pop off the address/regnum, and reset the location
1211 type. */
1212 if (this->location != DWARF_VALUE_LITERAL
1213 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1214 pop ();
1215 this->location = DWARF_VALUE_MEMORY;
1217 goto no_push;
1219 case DW_OP_bit_piece:
1221 uint64_t size, uleb_offset;
1223 /* Record the piece. */
1224 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
1225 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
1226 add_piece (size, uleb_offset);
1228 /* Pop off the address/regnum, and reset the location
1229 type. */
1230 if (this->location != DWARF_VALUE_LITERAL
1231 && this->location != DWARF_VALUE_OPTIMIZED_OUT)
1232 pop ();
1233 this->location = DWARF_VALUE_MEMORY;
1235 goto no_push;
1237 case DW_OP_GNU_uninit:
1238 if (op_ptr != op_end)
1239 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
1240 "be the very last op."));
1242 this->initialized = 0;
1243 goto no_push;
1245 case DW_OP_call2:
1247 cu_offset cu_off
1248 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
1249 op_ptr += 2;
1250 this->dwarf_call (cu_off);
1252 goto no_push;
1254 case DW_OP_call4:
1256 cu_offset cu_off
1257 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1258 op_ptr += 4;
1259 this->dwarf_call (cu_off);
1261 goto no_push;
1263 case DW_OP_GNU_variable_value:
1265 sect_offset sect_off
1266 = (sect_offset) extract_unsigned_integer (op_ptr,
1267 this->ref_addr_size,
1268 byte_order);
1269 op_ptr += this->ref_addr_size;
1270 result_val = this->dwarf_variable_value (sect_off);
1272 break;
1274 case DW_OP_entry_value:
1275 case DW_OP_GNU_entry_value:
1277 uint64_t len;
1278 CORE_ADDR deref_size;
1279 union call_site_parameter_u kind_u;
1281 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1282 if (op_ptr + len > op_end)
1283 error (_("DW_OP_entry_value: too few bytes available."));
1285 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
1286 if (kind_u.dwarf_reg != -1)
1288 op_ptr += len;
1289 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1290 kind_u,
1291 -1 /* deref_size */);
1292 goto no_push;
1295 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
1296 op_ptr + len,
1297 &deref_size);
1298 if (kind_u.dwarf_reg != -1)
1300 if (deref_size == -1)
1301 deref_size = this->addr_size;
1302 op_ptr += len;
1303 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
1304 kind_u, deref_size);
1305 goto no_push;
1308 error (_("DWARF-2 expression error: DW_OP_entry_value is "
1309 "supported only for single DW_OP_reg* "
1310 "or for DW_OP_breg*(0)+DW_OP_deref*"));
1313 case DW_OP_GNU_parameter_ref:
1315 union call_site_parameter_u kind_u;
1317 kind_u.param_cu_off
1318 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
1319 op_ptr += 4;
1320 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
1321 kind_u,
1322 -1 /* deref_size */);
1324 goto no_push;
1326 case DW_OP_const_type:
1327 case DW_OP_GNU_const_type:
1329 int n;
1330 const gdb_byte *data;
1331 struct type *type;
1333 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1334 cu_offset type_die_cu_off = (cu_offset) uoffset;
1336 n = *op_ptr++;
1337 data = op_ptr;
1338 op_ptr += n;
1340 type = get_base_type (type_die_cu_off, n);
1341 result_val = value_from_contents (type, data);
1343 break;
1345 case DW_OP_regval_type:
1346 case DW_OP_GNU_regval_type:
1348 struct type *type;
1350 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1351 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1352 cu_offset type_die_cu_off = (cu_offset) uoffset;
1354 type = get_base_type (type_die_cu_off, 0);
1355 result_val = this->get_reg_value (type, reg);
1357 break;
1359 case DW_OP_convert:
1360 case DW_OP_GNU_convert:
1361 case DW_OP_reinterpret:
1362 case DW_OP_GNU_reinterpret:
1364 struct type *type;
1366 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1367 cu_offset type_die_cu_off = (cu_offset) uoffset;
1369 if (to_underlying (type_die_cu_off) == 0)
1370 type = address_type;
1371 else
1372 type = get_base_type (type_die_cu_off, 0);
1374 result_val = fetch (0);
1375 pop ();
1377 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
1378 result_val = value_cast (type, result_val);
1379 else if (type == value_type (result_val))
1381 /* Nothing. */
1383 else if (TYPE_LENGTH (type)
1384 != TYPE_LENGTH (value_type (result_val)))
1385 error (_("DW_OP_reinterpret has wrong size"));
1386 else
1387 result_val
1388 = value_from_contents (type,
1389 value_contents_all (result_val));
1391 break;
1393 case DW_OP_push_object_address:
1394 /* Return the address of the object we are currently observing. */
1395 result = this->get_object_address ();
1396 result_val = value_from_ulongest (address_type, result);
1397 break;
1399 default:
1400 error (_("Unhandled dwarf expression opcode 0x%x"), op);
1403 /* Most things push a result value. */
1404 gdb_assert (result_val != NULL);
1405 push (result_val, in_stack_memory);
1406 no_push:
1410 /* To simplify our main caller, if the result is an implicit
1411 pointer, then make a pieced value. This is ok because we can't
1412 have implicit pointers in contexts where pieces are invalid. */
1413 if (this->location == DWARF_VALUE_IMPLICIT_POINTER)
1414 add_piece (8 * this->addr_size, 0);
1416 this->recursion_depth--;
1417 gdb_assert (this->recursion_depth >= 0);
1420 void
1421 _initialize_dwarf2expr (void)
1423 dwarf_arch_cookie
1424 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);