1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2023 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
29 #include "dwarf2/expr.h"
30 #include "dwarf2/loc.h"
31 #include "dwarf2/read.h"
33 #include "gdbsupport/underlying.h"
37 /* This holds gdbarch-specific types used by the DWARF expression
38 evaluator. See comments in execute_stack_op. */
40 struct dwarf_gdbarch_types
42 struct type
*dw_types
[3] {};
45 /* Cookie for gdbarch data. */
47 static const registry
<gdbarch
>::key
<dwarf_gdbarch_types
> dwarf_arch_cookie
;
49 /* Ensure that a FRAME is defined, throw an exception otherwise. */
52 ensure_have_frame (frame_info_ptr frame
, const char *op_name
)
55 throw_error (GENERIC_ERROR
,
56 _("%s evaluation requires a frame."), op_name
);
59 /* Ensure that a PER_CU is defined and throw an exception otherwise. */
62 ensure_have_per_cu (dwarf2_per_cu_data
*per_cu
, const char* op_name
)
64 if (per_cu
== nullptr)
65 throw_error (GENERIC_ERROR
,
66 _("%s evaluation requires a compilation unit."), op_name
);
69 /* Return the number of bytes overlapping a contiguous chunk of N_BITS
70 bits whose first bit is located at bit offset START. */
73 bits_to_bytes (ULONGEST start
, ULONGEST n_bits
)
75 return (start
% HOST_CHAR_BIT
+ n_bits
+ HOST_CHAR_BIT
- 1) / HOST_CHAR_BIT
;
81 read_addr_from_reg (frame_info_ptr frame
, int reg
)
83 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
84 int regnum
= dwarf_reg_to_regnum_or_error (gdbarch
, reg
);
86 return address_from_register (regnum
, frame
);
91 /* Reference count. */
94 /* The objfile from which this closure's expression came. */
95 dwarf2_per_objfile
*per_objfile
= nullptr;
97 /* The CU from which this closure's expression came. */
98 dwarf2_per_cu_data
*per_cu
= nullptr;
100 /* The pieces describing this variable. */
101 std::vector
<dwarf_expr_piece
> pieces
;
103 /* Frame ID of frame to which a register value is relative, used
104 only by DWARF_VALUE_REGISTER. */
105 struct frame_id frame_id
;
108 /* Allocate a closure for a value formed from separately-described
111 static piece_closure
*
112 allocate_piece_closure (dwarf2_per_cu_data
*per_cu
,
113 dwarf2_per_objfile
*per_objfile
,
114 std::vector
<dwarf_expr_piece
> &&pieces
,
115 frame_info_ptr frame
)
117 piece_closure
*c
= new piece_closure
;
120 /* We must capture this here due to sharing of DWARF state. */
121 c
->per_objfile
= per_objfile
;
123 c
->pieces
= std::move (pieces
);
124 if (frame
== nullptr)
125 c
->frame_id
= null_frame_id
;
127 c
->frame_id
= get_frame_id (frame
);
129 for (dwarf_expr_piece
&piece
: c
->pieces
)
130 if (piece
.location
== DWARF_VALUE_STACK
)
131 piece
.v
.value
->incref ();
136 /* Read or write a pieced value V. If FROM != NULL, operate in "write
137 mode": copy FROM into the pieces comprising V. If FROM == NULL,
138 operate in "read mode": fetch the contents of the (lazy) value V by
139 composing it from its pieces. If CHECK_OPTIMIZED is true, then no
140 reading or writing is done; instead the return value of this
141 function is true if any piece is optimized out. When
142 CHECK_OPTIMIZED is true, FROM must be nullptr. */
145 rw_pieced_value (value
*v
, value
*from
, bool check_optimized
)
148 LONGEST offset
= 0, max_offset
;
149 gdb_byte
*v_contents
;
150 const gdb_byte
*from_contents
;
152 = (piece_closure
*) v
->computed_closure ();
153 gdb::byte_vector buffer
;
154 bool bits_big_endian
= type_byte_order (v
->type ()) == BFD_ENDIAN_BIG
;
156 gdb_assert (!check_optimized
|| from
== nullptr);
159 from_contents
= from
->contents ().data ();
160 v_contents
= nullptr;
165 v_contents
= nullptr;
167 v_contents
= v
->contents_raw ().data ();
168 from_contents
= nullptr;
171 ULONGEST bits_to_skip
= 8 * v
->offset ();
174 bits_to_skip
+= (8 * v
->parent ()->offset ()
177 && (type_byte_order (from
->type ())
180 /* Use the least significant bits of FROM. */
181 max_offset
= 8 * from
->type ()->length ();
182 offset
= max_offset
- v
->bitsize ();
185 max_offset
= v
->bitsize ();
188 max_offset
= 8 * v
->type ()->length ();
190 /* Advance to the first non-skipped piece. */
191 for (i
= 0; i
< c
->pieces
.size () && bits_to_skip
>= c
->pieces
[i
].size
; i
++)
192 bits_to_skip
-= c
->pieces
[i
].size
;
194 for (; i
< c
->pieces
.size () && offset
< max_offset
; i
++)
196 dwarf_expr_piece
*p
= &c
->pieces
[i
];
197 size_t this_size_bits
, this_size
;
199 this_size_bits
= p
->size
- bits_to_skip
;
200 if (this_size_bits
> max_offset
- offset
)
201 this_size_bits
= max_offset
- offset
;
205 case DWARF_VALUE_REGISTER
:
207 frame_info_ptr frame
= frame_find_by_id (c
->frame_id
);
208 gdbarch
*arch
= get_frame_arch (frame
);
209 int gdb_regnum
= dwarf_reg_to_regnum_or_error (arch
, p
->v
.regno
);
210 ULONGEST reg_bits
= 8 * register_size (arch
, gdb_regnum
);
213 if (gdbarch_byte_order (arch
) == BFD_ENDIAN_BIG
214 && p
->offset
+ p
->size
< reg_bits
)
216 /* Big-endian, and we want less than full size. */
217 bits_to_skip
+= reg_bits
- (p
->offset
+ p
->size
);
220 bits_to_skip
+= p
->offset
;
222 this_size
= bits_to_bytes (bits_to_skip
, this_size_bits
);
223 buffer
.resize (this_size
);
228 if (!get_frame_register_bytes (frame
, gdb_regnum
,
230 buffer
, &optim
, &unavail
))
236 v
->mark_bits_optimized_out (offset
,
239 if (unavail
&& !check_optimized
)
240 v
->mark_bits_unavailable (offset
,
245 if (!check_optimized
)
246 copy_bitwise (v_contents
, offset
,
247 buffer
.data (), bits_to_skip
% 8,
248 this_size_bits
, bits_big_endian
);
253 if (bits_to_skip
% 8 != 0 || this_size_bits
% 8 != 0)
255 /* Data is copied non-byte-aligned into the register.
256 Need some bits from original register value. */
257 get_frame_register_bytes (frame
, gdb_regnum
,
259 buffer
, &optim
, &unavail
);
261 throw_error (OPTIMIZED_OUT_ERROR
,
262 _("Can't do read-modify-write to "
263 "update bitfield; containing word "
264 "has been optimized out"));
266 throw_error (NOT_AVAILABLE_ERROR
,
267 _("Can't do read-modify-write to "
268 "update bitfield; containing word "
272 copy_bitwise (buffer
.data (), bits_to_skip
% 8,
273 from_contents
, offset
,
274 this_size_bits
, bits_big_endian
);
275 put_frame_register_bytes (frame
, gdb_regnum
,
282 case DWARF_VALUE_MEMORY
:
287 bits_to_skip
+= p
->offset
;
289 CORE_ADDR start_addr
= p
->v
.mem
.addr
+ bits_to_skip
/ 8;
291 if (bits_to_skip
% 8 == 0 && this_size_bits
% 8 == 0
294 /* Everything is byte-aligned; no buffer needed. */
296 write_memory_with_notification (start_addr
,
301 read_value_memory (v
, offset
,
302 p
->v
.mem
.in_stack_memory
,
303 p
->v
.mem
.addr
+ bits_to_skip
/ 8,
304 v_contents
+ offset
/ 8,
309 this_size
= bits_to_bytes (bits_to_skip
, this_size_bits
);
310 buffer
.resize (this_size
);
315 read_value_memory (v
, offset
,
316 p
->v
.mem
.in_stack_memory
,
317 p
->v
.mem
.addr
+ bits_to_skip
/ 8,
318 buffer
.data (), this_size
);
319 copy_bitwise (v_contents
, offset
,
320 buffer
.data (), bits_to_skip
% 8,
321 this_size_bits
, bits_big_endian
);
326 if (bits_to_skip
% 8 != 0 || this_size_bits
% 8 != 0)
330 /* Perform a single read for small sizes. */
331 read_memory (start_addr
, buffer
.data (),
336 /* Only the first and last bytes can possibly have
338 read_memory (start_addr
, buffer
.data (), 1);
339 read_memory (start_addr
+ this_size
- 1,
340 &buffer
[this_size
- 1], 1);
344 copy_bitwise (buffer
.data (), bits_to_skip
% 8,
345 from_contents
, offset
,
346 this_size_bits
, bits_big_endian
);
347 write_memory_with_notification (start_addr
,
354 case DWARF_VALUE_STACK
:
361 v
->mark_bits_optimized_out (offset
, this_size_bits
);
365 gdbarch
*objfile_gdbarch
= c
->per_objfile
->objfile
->arch ();
366 ULONGEST stack_value_size_bits
367 = 8 * p
->v
.value
->type ()->length ();
369 /* Use zeroes if piece reaches beyond stack value. */
370 if (p
->offset
+ p
->size
> stack_value_size_bits
)
373 /* Piece is anchored at least significant bit end. */
374 if (gdbarch_byte_order (objfile_gdbarch
) == BFD_ENDIAN_BIG
)
375 bits_to_skip
+= stack_value_size_bits
- p
->offset
- p
->size
;
377 bits_to_skip
+= p
->offset
;
379 copy_bitwise (v_contents
, offset
,
380 p
->v
.value
->contents_all ().data (),
382 this_size_bits
, bits_big_endian
);
386 case DWARF_VALUE_LITERAL
:
393 v
->mark_bits_optimized_out (offset
, this_size_bits
);
397 ULONGEST literal_size_bits
= 8 * p
->v
.literal
.length
;
398 size_t n
= this_size_bits
;
400 /* Cut off at the end of the implicit value. */
401 bits_to_skip
+= p
->offset
;
402 if (bits_to_skip
>= literal_size_bits
)
404 if (n
> literal_size_bits
- bits_to_skip
)
405 n
= literal_size_bits
- bits_to_skip
;
407 copy_bitwise (v_contents
, offset
,
408 p
->v
.literal
.data
, bits_to_skip
,
413 case DWARF_VALUE_IMPLICIT_POINTER
:
416 v
->mark_bits_optimized_out (offset
, this_size_bits
);
420 /* These bits show up as zeros -- but do not cause the value to
421 be considered optimized-out. */
424 case DWARF_VALUE_OPTIMIZED_OUT
:
427 v
->mark_bits_optimized_out (offset
, this_size_bits
);
431 internal_error (_("invalid location type"));
434 offset
+= this_size_bits
;
438 if (offset
< max_offset
)
442 v
->mark_bits_optimized_out (offset
, max_offset
- offset
);
449 read_pieced_value (value
*v
)
451 rw_pieced_value (v
, nullptr, false);
455 write_pieced_value (value
*to
, value
*from
)
457 rw_pieced_value (to
, from
, false);
461 is_optimized_out_pieced_value (value
*v
)
463 return rw_pieced_value (v
, nullptr, true);
466 /* An implementation of an lval_funcs method to see whether a value is
467 a synthetic pointer. */
470 check_pieced_synthetic_pointer (const value
*value
, LONGEST bit_offset
,
473 piece_closure
*c
= (piece_closure
*) value
->computed_closure ();
476 bit_offset
+= 8 * value
->offset ();
477 if (value
->bitsize ())
478 bit_offset
+= value
->bitpos ();
480 for (i
= 0; i
< c
->pieces
.size () && bit_length
> 0; i
++)
482 dwarf_expr_piece
*p
= &c
->pieces
[i
];
483 size_t this_size_bits
= p
->size
;
487 if (bit_offset
>= this_size_bits
)
489 bit_offset
-= this_size_bits
;
493 bit_length
-= this_size_bits
- bit_offset
;
497 bit_length
-= this_size_bits
;
499 if (p
->location
!= DWARF_VALUE_IMPLICIT_POINTER
)
503 return bit_length
== 0;
506 /* An implementation of an lval_funcs method to indirect through a
507 pointer. This handles the synthetic pointer case when needed. */
510 indirect_pieced_value (value
*value
)
513 = (piece_closure
*) value
->computed_closure ();
515 dwarf_expr_piece
*piece
= NULL
;
517 struct type
*type
= check_typedef (value
->type ());
518 if (type
->code () != TYPE_CODE_PTR
)
521 int bit_length
= 8 * type
->length ();
522 LONGEST bit_offset
= 8 * value
->offset ();
523 if (value
->bitsize ())
524 bit_offset
+= value
->bitpos ();
526 for (i
= 0; i
< c
->pieces
.size () && bit_length
> 0; i
++)
528 dwarf_expr_piece
*p
= &c
->pieces
[i
];
529 size_t this_size_bits
= p
->size
;
533 if (bit_offset
>= this_size_bits
)
535 bit_offset
-= this_size_bits
;
539 bit_length
-= this_size_bits
- bit_offset
;
543 bit_length
-= this_size_bits
;
545 if (p
->location
!= DWARF_VALUE_IMPLICIT_POINTER
)
549 error (_("Invalid use of DW_OP_implicit_pointer"));
555 gdb_assert (piece
!= NULL
&& c
->per_cu
!= nullptr);
556 frame_info_ptr frame
= get_selected_frame (_("No frame selected."));
558 /* This is an offset requested by GDB, such as value subscripts.
559 However, due to how synthetic pointers are implemented, this is
560 always presented to us as a pointer type. This means we have to
561 sign-extend it manually as appropriate. Use raw
562 extract_signed_integer directly rather than value_as_address and
563 sign extend afterwards on architectures that would need it
564 (mostly everywhere except MIPS, which has signed addresses) as
565 the later would go through gdbarch_pointer_to_address and thus
566 return a CORE_ADDR with high bits set on architectures that
567 encode address spaces and other things in CORE_ADDR. */
568 bfd_endian byte_order
= gdbarch_byte_order (get_frame_arch (frame
));
570 = extract_signed_integer (value
->contents (), byte_order
);
571 byte_offset
+= piece
->v
.ptr
.offset
;
573 return indirect_synthetic_pointer (piece
->v
.ptr
.die_sect_off
,
574 byte_offset
, c
->per_cu
,
575 c
->per_objfile
, frame
, type
);
578 /* Implementation of the coerce_ref method of lval_funcs for synthetic C++
582 coerce_pieced_ref (const value
*value
)
584 struct type
*type
= check_typedef (value
->type ());
586 if (value
->bits_synthetic_pointer (value
->embedded_offset (),
587 TARGET_CHAR_BIT
* type
->length ()))
589 const piece_closure
*closure
590 = (piece_closure
*) value
->computed_closure ();
592 = get_selected_frame (_("No frame selected."));
594 /* gdb represents synthetic pointers as pieced values with a single
596 gdb_assert (closure
!= NULL
);
597 gdb_assert (closure
->pieces
.size () == 1);
599 return indirect_synthetic_pointer
600 (closure
->pieces
[0].v
.ptr
.die_sect_off
,
601 closure
->pieces
[0].v
.ptr
.offset
,
602 closure
->per_cu
, closure
->per_objfile
, frame
, type
);
606 /* Else: not a synthetic reference; do nothing. */
612 copy_pieced_value_closure (const value
*v
)
614 piece_closure
*c
= (piece_closure
*) v
->computed_closure ();
621 free_pieced_value_closure (value
*v
)
623 piece_closure
*c
= (piece_closure
*) v
->computed_closure ();
628 for (dwarf_expr_piece
&p
: c
->pieces
)
629 if (p
.location
== DWARF_VALUE_STACK
)
630 p
.v
.value
->decref ();
636 /* Functions for accessing a variable described by DW_OP_piece. */
637 static const struct lval_funcs pieced_value_funcs
= {
640 is_optimized_out_pieced_value
,
641 indirect_pieced_value
,
643 check_pieced_synthetic_pointer
,
644 copy_pieced_value_closure
,
645 free_pieced_value_closure
648 /* Given context CTX, section offset SECT_OFF, and compilation unit
649 data PER_CU, execute the "variable value" operation on the DIE
650 found at SECT_OFF. */
653 sect_variable_value (sect_offset sect_off
,
654 dwarf2_per_cu_data
*per_cu
,
655 dwarf2_per_objfile
*per_objfile
)
657 const char *var_name
= nullptr;
658 struct type
*die_type
659 = dwarf2_fetch_die_type_sect_off (sect_off
, per_cu
, per_objfile
,
662 if (die_type
== NULL
)
663 error (_("Bad DW_OP_GNU_variable_value DIE."));
665 /* Note: Things still work when the following test is removed. This
666 test and error is here to conform to the proposed specification. */
667 if (die_type
->code () != TYPE_CODE_INT
668 && die_type
->code () != TYPE_CODE_ENUM
669 && die_type
->code () != TYPE_CODE_RANGE
670 && die_type
->code () != TYPE_CODE_PTR
)
671 error (_("Type of DW_OP_GNU_variable_value DIE must be an integer or pointer."));
673 if (var_name
!= nullptr)
675 value
*result
= compute_var_value (var_name
);
676 if (result
!= nullptr)
680 struct type
*type
= lookup_pointer_type (die_type
);
681 frame_info_ptr frame
= get_selected_frame (_("No frame selected."));
682 return indirect_synthetic_pointer (sect_off
, 0, per_cu
, per_objfile
, frame
,
686 /* Return the type used for DWARF operations where the type is
687 unspecified in the DWARF spec. Only certain sizes are
691 dwarf_expr_context::address_type () const
693 gdbarch
*arch
= this->m_per_objfile
->objfile
->arch ();
694 dwarf_gdbarch_types
*types
= dwarf_arch_cookie
.get (arch
);
695 if (types
== nullptr)
696 types
= dwarf_arch_cookie
.emplace (arch
);
699 if (this->m_addr_size
== 2)
701 else if (this->m_addr_size
== 4)
703 else if (this->m_addr_size
== 8)
706 error (_("Unsupported address size in DWARF expressions: %d bits"),
707 8 * this->m_addr_size
);
709 if (types
->dw_types
[ndx
] == NULL
)
711 type_allocator
alloc (arch
);
713 = init_integer_type (alloc
, 8 * this->m_addr_size
,
714 0, "<signed DWARF address type>");
717 return types
->dw_types
[ndx
];
720 /* Create a new context for the expression evaluator. */
722 dwarf_expr_context::dwarf_expr_context (dwarf2_per_objfile
*per_objfile
,
724 : m_addr_size (addr_size
),
725 m_per_objfile (per_objfile
)
729 /* Push VALUE onto the stack. */
732 dwarf_expr_context::push (struct value
*value
, bool in_stack_memory
)
734 this->m_stack
.emplace_back (value
, in_stack_memory
);
737 /* Push VALUE onto the stack. */
740 dwarf_expr_context::push_address (CORE_ADDR value
, bool in_stack_memory
)
742 push (value_from_ulongest (address_type (), value
), in_stack_memory
);
745 /* Pop the top item off of the stack. */
748 dwarf_expr_context::pop ()
750 if (this->m_stack
.empty ())
751 error (_("dwarf expression stack underflow"));
753 this->m_stack
.pop_back ();
756 /* Retrieve the N'th item on the stack. */
759 dwarf_expr_context::fetch (int n
)
761 if (this->m_stack
.size () <= n
)
762 error (_("Asked for position %d of stack, "
763 "stack only has %zu elements on it."),
764 n
, this->m_stack
.size ());
765 return this->m_stack
[this->m_stack
.size () - (1 + n
)].value
;
771 dwarf_expr_context::get_frame_base (const gdb_byte
**start
,
774 ensure_have_frame (this->m_frame
, "DW_OP_fbreg");
776 const block
*bl
= get_frame_block (this->m_frame
, NULL
);
779 error (_("frame address is not available."));
781 /* Use block_linkage_function, which returns a real (not inlined)
782 function, instead of get_frame_function, which may return an
784 symbol
*framefunc
= bl
->linkage_function ();
786 /* If we found a frame-relative symbol then it was certainly within
787 some function associated with a frame. If we can't find the frame,
788 something has gone wrong. */
789 gdb_assert (framefunc
!= NULL
);
791 func_get_frame_base_dwarf_block (framefunc
,
792 get_frame_address_in_block (this->m_frame
),
799 dwarf_expr_context::get_base_type (cu_offset die_cu_off
)
801 if (this->m_per_cu
== nullptr)
802 return builtin_type (this->m_per_objfile
->objfile
->arch ())->builtin_int
;
804 struct type
*result
= dwarf2_get_die_type (die_cu_off
, this->m_per_cu
,
805 this->m_per_objfile
);
807 if (result
== nullptr)
808 error (_("Could not find type for operation"));
816 dwarf_expr_context::dwarf_call (cu_offset die_cu_off
)
818 ensure_have_per_cu (this->m_per_cu
, "DW_OP_call");
820 frame_info_ptr frame
= this->m_frame
;
822 auto get_pc_from_frame
= [frame
] ()
824 ensure_have_frame (frame
, "DW_OP_call");
825 return get_frame_address_in_block (frame
);
828 dwarf2_locexpr_baton block
829 = dwarf2_fetch_die_loc_cu_off (die_cu_off
, this->m_per_cu
,
830 this->m_per_objfile
, get_pc_from_frame
);
832 /* DW_OP_call_ref is currently not supported. */
833 gdb_assert (block
.per_cu
== this->m_per_cu
);
835 this->eval (block
.data
, block
.size
);
841 dwarf_expr_context::read_mem (gdb_byte
*buf
, CORE_ADDR addr
,
847 /* Prefer the passed-in memory, if it exists. */
848 if (this->m_addr_info
!= nullptr)
850 CORE_ADDR offset
= addr
- this->m_addr_info
->addr
;
852 if (offset
< this->m_addr_info
->valaddr
.size ()
853 && offset
+ length
<= this->m_addr_info
->valaddr
.size ())
855 memcpy (buf
, this->m_addr_info
->valaddr
.data (), length
);
860 read_memory (addr
, buf
, length
);
866 dwarf_expr_context::push_dwarf_reg_entry_value (call_site_parameter_kind kind
,
867 call_site_parameter_u kind_u
,
870 ensure_have_per_cu (this->m_per_cu
, "DW_OP_entry_value");
871 ensure_have_frame (this->m_frame
, "DW_OP_entry_value");
873 dwarf2_per_cu_data
*caller_per_cu
;
874 dwarf2_per_objfile
*caller_per_objfile
;
875 frame_info_ptr caller_frame
= get_prev_frame (this->m_frame
);
876 call_site_parameter
*parameter
877 = dwarf_expr_reg_to_entry_parameter (this->m_frame
, kind
, kind_u
,
879 &caller_per_objfile
);
880 const gdb_byte
*data_src
881 = deref_size
== -1 ? parameter
->value
: parameter
->data_value
;
883 = deref_size
== -1 ? parameter
->value_size
: parameter
->data_value_size
;
885 /* DEREF_SIZE size is not verified here. */
886 if (data_src
== nullptr)
887 throw_error (NO_ENTRY_VALUE_ERROR
,
888 _("Cannot resolve DW_AT_call_data_value"));
890 /* We are about to evaluate an expression in the context of the caller
891 of the current frame. This evaluation context may be different from
892 the current (callee's) context), so temporarily set the caller's context.
894 It is possible for the caller to be from a different objfile from the
895 callee if the call is made through a function pointer. */
896 scoped_restore save_frame
= make_scoped_restore (&this->m_frame
,
898 scoped_restore save_per_cu
= make_scoped_restore (&this->m_per_cu
,
900 scoped_restore save_addr_info
= make_scoped_restore (&this->m_addr_info
,
902 scoped_restore save_per_objfile
= make_scoped_restore (&this->m_per_objfile
,
905 scoped_restore save_addr_size
= make_scoped_restore (&this->m_addr_size
);
906 this->m_addr_size
= this->m_per_cu
->addr_size ();
908 this->eval (data_src
, size
);
914 dwarf_expr_context::fetch_result (struct type
*type
, struct type
*subobj_type
,
915 LONGEST subobj_offset
, bool as_lval
)
917 value
*retval
= nullptr;
918 gdbarch
*arch
= this->m_per_objfile
->objfile
->arch ();
921 type
= address_type ();
923 if (subobj_type
== nullptr)
926 /* Ensure that, if TYPE or SUBOBJ_TYPE are typedefs, their length is filled
927 in instead of being zero. */
928 check_typedef (type
);
929 check_typedef (subobj_type
);
931 if (this->m_pieces
.size () > 0)
933 ULONGEST bit_size
= 0;
935 for (dwarf_expr_piece
&piece
: this->m_pieces
)
936 bit_size
+= piece
.size
;
937 /* Complain if the expression is larger than the size of the
939 if (bit_size
> 8 * type
->length ())
940 invalid_synthetic_pointer ();
943 = allocate_piece_closure (this->m_per_cu
, this->m_per_objfile
,
944 std::move (this->m_pieces
), this->m_frame
);
945 retval
= value::allocate_computed (subobj_type
,
946 &pieced_value_funcs
, c
);
947 retval
->set_offset (subobj_offset
);
951 /* If AS_LVAL is false, means that the implicit conversion
952 from a location description to value is expected. */
954 this->m_location
= DWARF_VALUE_STACK
;
956 switch (this->m_location
)
958 case DWARF_VALUE_REGISTER
:
960 gdbarch
*f_arch
= get_frame_arch (this->m_frame
);
962 = longest_to_int (value_as_long (this->fetch (0)));
963 int gdb_regnum
= dwarf_reg_to_regnum_or_error (f_arch
,
966 if (subobj_offset
!= 0)
967 error (_("cannot use offset on synthetic pointer to register"));
969 gdb_assert (this->m_frame
!= NULL
);
971 retval
= value_from_register (subobj_type
, gdb_regnum
,
973 if (retval
->optimized_out ())
975 /* This means the register has undefined value / was
976 not saved. As we're computing the location of some
977 variable etc. in the program, not a value for
978 inspecting a register ($pc, $sp, etc.), return a
979 generic optimized out value instead, so that we show
980 <optimized out> instead of <not saved>. */
981 value
*tmp
= value::allocate (subobj_type
);
982 retval
->contents_copy (tmp
, 0, 0,
983 subobj_type
->length ());
989 case DWARF_VALUE_MEMORY
:
991 struct type
*ptr_type
;
992 CORE_ADDR address
= this->fetch_address (0);
993 bool in_stack_memory
= this->fetch_in_stack_memory (0);
995 /* DW_OP_deref_size (and possibly other operations too) may
996 create a pointer instead of an address. Ideally, the
997 pointer to address conversion would be performed as part
998 of those operations, but the type of the object to
999 which the address refers is not known at the time of
1000 the operation. Therefore, we do the conversion here
1001 since the type is readily available. */
1003 switch (subobj_type
->code ())
1005 case TYPE_CODE_FUNC
:
1006 case TYPE_CODE_METHOD
:
1007 ptr_type
= builtin_type (arch
)->builtin_func_ptr
;
1010 ptr_type
= builtin_type (arch
)->builtin_data_ptr
;
1013 address
= value_as_address (value_from_pointer (ptr_type
, address
));
1015 retval
= value_at_lazy (subobj_type
, address
+ subobj_offset
,
1017 if (in_stack_memory
)
1018 retval
->set_stack (true);
1022 case DWARF_VALUE_STACK
:
1024 value
*val
= this->fetch (0);
1025 size_t n
= val
->type ()->length ();
1026 size_t len
= subobj_type
->length ();
1027 size_t max
= type
->length ();
1029 if (subobj_offset
+ len
> max
)
1030 invalid_synthetic_pointer ();
1032 retval
= value::allocate (subobj_type
);
1034 /* The given offset is relative to the actual object. */
1035 if (gdbarch_byte_order (arch
) == BFD_ENDIAN_BIG
)
1036 subobj_offset
+= n
- max
;
1038 copy (val
->contents_all ().slice (subobj_offset
, len
),
1039 retval
->contents_raw ());
1043 case DWARF_VALUE_LITERAL
:
1045 size_t n
= subobj_type
->length ();
1047 if (subobj_offset
+ n
> this->m_len
)
1048 invalid_synthetic_pointer ();
1050 retval
= value::allocate (subobj_type
);
1051 bfd_byte
*contents
= retval
->contents_raw ().data ();
1052 memcpy (contents
, this->m_data
+ subobj_offset
, n
);
1056 case DWARF_VALUE_OPTIMIZED_OUT
:
1057 retval
= value::allocate_optimized_out (subobj_type
);
1060 /* DWARF_VALUE_IMPLICIT_POINTER was converted to a pieced
1061 operation by execute_stack_op. */
1062 case DWARF_VALUE_IMPLICIT_POINTER
:
1063 /* DWARF_VALUE_OPTIMIZED_OUT can't occur in this context --
1064 it can only be encountered when making a piece. */
1066 internal_error (_("invalid location type"));
1070 retval
->set_initialized (this->m_initialized
);
1078 dwarf_expr_context::evaluate (const gdb_byte
*addr
, size_t len
, bool as_lval
,
1079 dwarf2_per_cu_data
*per_cu
, frame_info_ptr frame
,
1080 const struct property_addr_info
*addr_info
,
1081 struct type
*type
, struct type
*subobj_type
,
1082 LONGEST subobj_offset
)
1084 this->m_per_cu
= per_cu
;
1085 this->m_frame
= frame
;
1086 this->m_addr_info
= addr_info
;
1089 return fetch_result (type
, subobj_type
, subobj_offset
, as_lval
);
1092 /* Require that TYPE be an integral type; throw an exception if not. */
1095 dwarf_require_integral (struct type
*type
)
1097 if (type
->code () != TYPE_CODE_INT
1098 && type
->code () != TYPE_CODE_CHAR
1099 && type
->code () != TYPE_CODE_BOOL
)
1100 error (_("integral type expected in DWARF expression"));
1103 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
1106 static struct type
*
1107 get_unsigned_type (struct gdbarch
*gdbarch
, struct type
*type
)
1109 switch (type
->length ())
1112 return builtin_type (gdbarch
)->builtin_uint8
;
1114 return builtin_type (gdbarch
)->builtin_uint16
;
1116 return builtin_type (gdbarch
)->builtin_uint32
;
1118 return builtin_type (gdbarch
)->builtin_uint64
;
1120 error (_("no unsigned variant found for type, while evaluating "
1121 "DWARF expression"));
1125 /* Return the signed form of TYPE. TYPE is necessarily an integral
1128 static struct type
*
1129 get_signed_type (struct gdbarch
*gdbarch
, struct type
*type
)
1131 switch (type
->length ())
1134 return builtin_type (gdbarch
)->builtin_int8
;
1136 return builtin_type (gdbarch
)->builtin_int16
;
1138 return builtin_type (gdbarch
)->builtin_int32
;
1140 return builtin_type (gdbarch
)->builtin_int64
;
1142 error (_("no signed variant found for type, while evaluating "
1143 "DWARF expression"));
1147 /* Retrieve the N'th item on the stack, converted to an address. */
1150 dwarf_expr_context::fetch_address (int n
)
1152 gdbarch
*arch
= this->m_per_objfile
->objfile
->arch ();
1153 value
*result_val
= fetch (n
);
1154 bfd_endian byte_order
= gdbarch_byte_order (arch
);
1157 dwarf_require_integral (result_val
->type ());
1158 result
= extract_unsigned_integer (result_val
->contents (), byte_order
);
1160 /* For most architectures, calling extract_unsigned_integer() alone
1161 is sufficient for extracting an address. However, some
1162 architectures (e.g. MIPS) use signed addresses and using
1163 extract_unsigned_integer() will not produce a correct
1164 result. Make sure we invoke gdbarch_integer_to_address()
1165 for those architectures which require it. */
1166 if (gdbarch_integer_to_address_p (arch
))
1168 gdb_byte
*buf
= (gdb_byte
*) alloca (this->m_addr_size
);
1169 type
*int_type
= get_unsigned_type (arch
,
1170 result_val
->type ());
1172 store_unsigned_integer (buf
, this->m_addr_size
, byte_order
, result
);
1173 return gdbarch_integer_to_address (arch
, int_type
, buf
);
1176 return (CORE_ADDR
) result
;
1179 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
1182 dwarf_expr_context::fetch_in_stack_memory (int n
)
1184 if (this->m_stack
.size () <= n
)
1185 error (_("Asked for position %d of stack, "
1186 "stack only has %zu elements on it."),
1187 n
, this->m_stack
.size ());
1188 return this->m_stack
[this->m_stack
.size () - (1 + n
)].in_stack_memory
;
1191 /* Return true if the expression stack is empty. */
1194 dwarf_expr_context::stack_empty_p () const
1196 return m_stack
.empty ();
1199 /* Add a new piece to the dwarf_expr_context's piece list. */
1201 dwarf_expr_context::add_piece (ULONGEST size
, ULONGEST offset
)
1203 this->m_pieces
.emplace_back ();
1204 dwarf_expr_piece
&p
= this->m_pieces
.back ();
1206 p
.location
= this->m_location
;
1210 if (p
.location
== DWARF_VALUE_LITERAL
)
1212 p
.v
.literal
.data
= this->m_data
;
1213 p
.v
.literal
.length
= this->m_len
;
1215 else if (stack_empty_p ())
1217 p
.location
= DWARF_VALUE_OPTIMIZED_OUT
;
1218 /* Also reset the context's location, for our callers. This is
1219 a somewhat strange approach, but this lets us avoid setting
1220 the location to DWARF_VALUE_MEMORY in all the individual
1221 cases in the evaluator. */
1222 this->m_location
= DWARF_VALUE_OPTIMIZED_OUT
;
1224 else if (p
.location
== DWARF_VALUE_MEMORY
)
1226 p
.v
.mem
.addr
= fetch_address (0);
1227 p
.v
.mem
.in_stack_memory
= fetch_in_stack_memory (0);
1229 else if (p
.location
== DWARF_VALUE_IMPLICIT_POINTER
)
1231 p
.v
.ptr
.die_sect_off
= (sect_offset
) this->m_len
;
1232 p
.v
.ptr
.offset
= value_as_long (fetch (0));
1234 else if (p
.location
== DWARF_VALUE_REGISTER
)
1235 p
.v
.regno
= value_as_long (fetch (0));
1238 p
.v
.value
= fetch (0);
1242 /* Evaluate the expression at ADDR (LEN bytes long). */
1245 dwarf_expr_context::eval (const gdb_byte
*addr
, size_t len
)
1247 int old_recursion_depth
= this->m_recursion_depth
;
1249 execute_stack_op (addr
, addr
+ len
);
1251 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
1253 gdb_assert (this->m_recursion_depth
== old_recursion_depth
);
1256 /* Helper to read a uleb128 value or throw an error. */
1259 safe_read_uleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
1262 buf
= gdb_read_uleb128 (buf
, buf_end
, r
);
1264 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
1268 /* Helper to read a sleb128 value or throw an error. */
1271 safe_read_sleb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
1274 buf
= gdb_read_sleb128 (buf
, buf_end
, r
);
1276 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
1281 safe_skip_leb128 (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
1283 buf
= gdb_skip_leb128 (buf
, buf_end
);
1285 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
1290 /* Check that the current operator is either at the end of an
1291 expression, or that it is followed by a composition operator or by
1292 DW_OP_GNU_uninit (which should terminate the expression). */
1295 dwarf_expr_require_composition (const gdb_byte
*op_ptr
, const gdb_byte
*op_end
,
1296 const char *op_name
)
1298 if (op_ptr
!= op_end
&& *op_ptr
!= DW_OP_piece
&& *op_ptr
!= DW_OP_bit_piece
1299 && *op_ptr
!= DW_OP_GNU_uninit
)
1300 error (_("DWARF-2 expression error: `%s' operations must be "
1301 "used either alone or in conjunction with DW_OP_piece "
1302 "or DW_OP_bit_piece."),
1306 /* Return true iff the types T1 and T2 are "the same". This only does
1307 checks that might reasonably be needed to compare DWARF base
1311 base_types_equal_p (struct type
*t1
, struct type
*t2
)
1313 if (t1
->code () != t2
->code ())
1315 if (t1
->is_unsigned () != t2
->is_unsigned ())
1317 return t1
->length () == t2
->length ();
1320 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
1321 DWARF register number. Otherwise return -1. */
1324 dwarf_block_to_dwarf_reg (const gdb_byte
*buf
, const gdb_byte
*buf_end
)
1330 if (*buf
>= DW_OP_reg0
&& *buf
<= DW_OP_reg31
)
1332 if (buf_end
- buf
!= 1)
1334 return *buf
- DW_OP_reg0
;
1337 if (*buf
== DW_OP_regval_type
|| *buf
== DW_OP_GNU_regval_type
)
1340 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
1343 buf
= gdb_skip_leb128 (buf
, buf_end
);
1347 else if (*buf
== DW_OP_regx
)
1350 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
1356 if (buf
!= buf_end
|| (int) dwarf_reg
!= dwarf_reg
)
1361 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
1362 DW_OP_deref* return the DWARF register number. Otherwise return -1.
1363 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
1364 size from DW_OP_deref_size. */
1367 dwarf_block_to_dwarf_reg_deref (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
1368 CORE_ADDR
*deref_size_return
)
1376 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
1378 dwarf_reg
= *buf
- DW_OP_breg0
;
1383 else if (*buf
== DW_OP_bregx
)
1386 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
1389 if ((int) dwarf_reg
!= dwarf_reg
)
1395 buf
= gdb_read_sleb128 (buf
, buf_end
, &offset
);
1401 if (*buf
== DW_OP_deref
)
1404 *deref_size_return
= -1;
1406 else if (*buf
== DW_OP_deref_size
)
1411 *deref_size_return
= *buf
++;
1422 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
1423 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
1426 dwarf_block_to_fb_offset (const gdb_byte
*buf
, const gdb_byte
*buf_end
,
1427 CORE_ADDR
*fb_offset_return
)
1434 if (*buf
!= DW_OP_fbreg
)
1438 buf
= gdb_read_sleb128 (buf
, buf_end
, &fb_offset
);
1441 *fb_offset_return
= fb_offset
;
1442 if (buf
!= buf_end
|| fb_offset
!= (LONGEST
) *fb_offset_return
)
1448 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
1449 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
1450 The matched SP register number depends on GDBARCH. */
1453 dwarf_block_to_sp_offset (struct gdbarch
*gdbarch
, const gdb_byte
*buf
,
1454 const gdb_byte
*buf_end
, CORE_ADDR
*sp_offset_return
)
1461 if (*buf
>= DW_OP_breg0
&& *buf
<= DW_OP_breg31
)
1463 dwarf_reg
= *buf
- DW_OP_breg0
;
1468 if (*buf
!= DW_OP_bregx
)
1471 buf
= gdb_read_uleb128 (buf
, buf_end
, &dwarf_reg
);
1476 if (dwarf_reg_to_regnum (gdbarch
, dwarf_reg
)
1477 != gdbarch_sp_regnum (gdbarch
))
1480 buf
= gdb_read_sleb128 (buf
, buf_end
, &sp_offset
);
1483 *sp_offset_return
= sp_offset
;
1484 if (buf
!= buf_end
|| sp_offset
!= (LONGEST
) *sp_offset_return
)
1490 /* The engine for the expression evaluator. Using the context in this
1491 object, evaluate the expression between OP_PTR and OP_END. */
1494 dwarf_expr_context::execute_stack_op (const gdb_byte
*op_ptr
,
1495 const gdb_byte
*op_end
)
1497 gdbarch
*arch
= this->m_per_objfile
->objfile
->arch ();
1498 bfd_endian byte_order
= gdbarch_byte_order (arch
);
1499 /* Old-style "untyped" DWARF values need special treatment in a
1500 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
1501 a special type for these values so we can distinguish them from
1502 values that have an explicit type, because explicitly-typed
1503 values do not need special treatment. This special type must be
1504 different (in the `==' sense) from any base type coming from the
1506 type
*address_type
= this->address_type ();
1508 this->m_location
= DWARF_VALUE_MEMORY
;
1509 this->m_initialized
= true; /* Default is initialized. */
1511 if (this->m_recursion_depth
> this->m_max_recursion_depth
)
1512 error (_("DWARF-2 expression error: Loop detected (%d)."),
1513 this->m_recursion_depth
);
1514 this->m_recursion_depth
++;
1516 while (op_ptr
< op_end
)
1518 dwarf_location_atom op
= (dwarf_location_atom
) *op_ptr
++;
1520 /* Assume the value is not in stack memory.
1521 Code that knows otherwise sets this to true.
1522 Some arithmetic on stack addresses can probably be assumed to still
1523 be a stack address, but we skip this complication for now.
1524 This is just an optimization, so it's always ok to punt
1525 and leave this as false. */
1526 bool in_stack_memory
= false;
1527 uint64_t uoffset
, reg
;
1529 value
*result_val
= NULL
;
1531 /* The DWARF expression might have a bug causing an infinite
1532 loop. In that case, quitting is the only way out. */
1569 result
= op
- DW_OP_lit0
;
1570 result_val
= value_from_ulongest (address_type
, result
);
1574 result
= extract_unsigned_integer (op_ptr
,
1575 this->m_addr_size
, byte_order
);
1576 op_ptr
+= this->m_addr_size
;
1577 /* Some versions of GCC emit DW_OP_addr before
1578 DW_OP_GNU_push_tls_address. In this case the value is an
1579 index, not an address. We don't support things like
1580 branching between the address and the TLS op. */
1581 if (op_ptr
>= op_end
|| *op_ptr
!= DW_OP_GNU_push_tls_address
)
1582 result
+= this->m_per_objfile
->objfile
->text_section_offset ();
1583 result_val
= value_from_ulongest (address_type
, result
);
1587 case DW_OP_GNU_addr_index
:
1588 ensure_have_per_cu (this->m_per_cu
, "DW_OP_addrx");
1590 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1591 result
= (m_per_objfile
->relocate
1592 (dwarf2_read_addr_index (this->m_per_cu
,
1593 this->m_per_objfile
,
1595 result_val
= value_from_ulongest (address_type
, result
);
1597 case DW_OP_GNU_const_index
:
1598 ensure_have_per_cu (this->m_per_cu
, "DW_OP_GNU_const_index");
1600 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1601 result
= (ULONGEST
) dwarf2_read_addr_index (this->m_per_cu
,
1602 this->m_per_objfile
,
1604 result_val
= value_from_ulongest (address_type
, result
);
1608 result
= extract_unsigned_integer (op_ptr
, 1, byte_order
);
1609 result_val
= value_from_ulongest (address_type
, result
);
1613 result
= extract_signed_integer (op_ptr
, 1, byte_order
);
1614 result_val
= value_from_ulongest (address_type
, result
);
1618 result
= extract_unsigned_integer (op_ptr
, 2, byte_order
);
1619 result_val
= value_from_ulongest (address_type
, result
);
1623 result
= extract_signed_integer (op_ptr
, 2, byte_order
);
1624 result_val
= value_from_ulongest (address_type
, result
);
1628 result
= extract_unsigned_integer (op_ptr
, 4, byte_order
);
1629 result_val
= value_from_ulongest (address_type
, result
);
1633 result
= extract_signed_integer (op_ptr
, 4, byte_order
);
1634 result_val
= value_from_ulongest (address_type
, result
);
1638 result
= extract_unsigned_integer (op_ptr
, 8, byte_order
);
1639 result_val
= value_from_ulongest (address_type
, result
);
1643 result
= extract_signed_integer (op_ptr
, 8, byte_order
);
1644 result_val
= value_from_ulongest (address_type
, result
);
1648 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1650 result_val
= value_from_ulongest (address_type
, result
);
1653 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
1655 result_val
= value_from_ulongest (address_type
, result
);
1658 /* The DW_OP_reg operations are required to occur alone in
1659 location expressions. */
1692 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_reg");
1694 result
= op
- DW_OP_reg0
;
1695 result_val
= value_from_ulongest (address_type
, result
);
1696 this->m_location
= DWARF_VALUE_REGISTER
;
1700 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1701 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_regx");
1704 result_val
= value_from_ulongest (address_type
, result
);
1705 this->m_location
= DWARF_VALUE_REGISTER
;
1708 case DW_OP_implicit_value
:
1712 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
1713 if (op_ptr
+ len
> op_end
)
1714 error (_("DW_OP_implicit_value: too few bytes available."));
1716 this->m_data
= op_ptr
;
1717 this->m_location
= DWARF_VALUE_LITERAL
;
1719 dwarf_expr_require_composition (op_ptr
, op_end
,
1720 "DW_OP_implicit_value");
1724 case DW_OP_stack_value
:
1725 this->m_location
= DWARF_VALUE_STACK
;
1726 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_stack_value");
1729 case DW_OP_implicit_pointer
:
1730 case DW_OP_GNU_implicit_pointer
:
1733 ensure_have_per_cu (this->m_per_cu
, "DW_OP_implicit_pointer");
1735 int ref_addr_size
= this->m_per_cu
->ref_addr_size ();
1737 /* The referred-to DIE of sect_offset kind. */
1738 this->m_len
= extract_unsigned_integer (op_ptr
, ref_addr_size
,
1740 op_ptr
+= ref_addr_size
;
1742 /* The byte offset into the data. */
1743 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &len
);
1744 result
= (ULONGEST
) len
;
1745 result_val
= value_from_ulongest (address_type
, result
);
1747 this->m_location
= DWARF_VALUE_IMPLICIT_POINTER
;
1748 dwarf_expr_require_composition (op_ptr
, op_end
,
1749 "DW_OP_implicit_pointer");
1786 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
1787 ensure_have_frame (this->m_frame
, "DW_OP_breg");
1789 result
= read_addr_from_reg (this->m_frame
, op
- DW_OP_breg0
);
1791 result_val
= value_from_ulongest (address_type
, result
);
1796 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1797 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
1798 ensure_have_frame (this->m_frame
, "DW_OP_bregx");
1800 result
= read_addr_from_reg (this->m_frame
, reg
);
1802 result_val
= value_from_ulongest (address_type
, result
);
1807 const gdb_byte
*datastart
;
1810 op_ptr
= safe_read_sleb128 (op_ptr
, op_end
, &offset
);
1812 /* Rather than create a whole new context, we simply
1813 backup the current stack locally and install a new empty stack,
1814 then reset it afterwards, effectively erasing whatever the
1815 recursive call put there. */
1816 std::vector
<dwarf_stack_value
> saved_stack
= std::move (this->m_stack
);
1817 this->m_stack
.clear ();
1819 /* FIXME: cagney/2003-03-26: This code should be using
1820 get_frame_base_address(), and then implement a dwarf2
1821 specific this_base method. */
1822 this->get_frame_base (&datastart
, &datalen
);
1823 eval (datastart
, datalen
);
1824 if (this->m_location
== DWARF_VALUE_MEMORY
)
1825 result
= fetch_address (0);
1826 else if (this->m_location
== DWARF_VALUE_REGISTER
)
1828 = read_addr_from_reg (this->m_frame
, value_as_long (fetch (0)));
1830 error (_("Not implemented: computing frame "
1831 "base using explicit value operator"));
1832 result
= result
+ offset
;
1833 result_val
= value_from_ulongest (address_type
, result
);
1834 in_stack_memory
= true;
1836 /* Restore the content of the original stack. */
1837 this->m_stack
= std::move (saved_stack
);
1839 this->m_location
= DWARF_VALUE_MEMORY
;
1844 result_val
= fetch (0);
1845 in_stack_memory
= fetch_in_stack_memory (0);
1854 result_val
= fetch (offset
);
1855 in_stack_memory
= fetch_in_stack_memory (offset
);
1860 if (this->m_stack
.size () < 2)
1861 error (_("Not enough elements for "
1862 "DW_OP_swap. Need 2, have %zu."),
1863 this->m_stack
.size ());
1865 dwarf_stack_value
&t1
= this->m_stack
[this->m_stack
.size () - 1];
1866 dwarf_stack_value
&t2
= this->m_stack
[this->m_stack
.size () - 2];
1872 result_val
= fetch (1);
1873 in_stack_memory
= fetch_in_stack_memory (1);
1878 if (this->m_stack
.size () < 3)
1879 error (_("Not enough elements for "
1880 "DW_OP_rot. Need 3, have %zu."),
1881 this->m_stack
.size ());
1883 dwarf_stack_value temp
= this->m_stack
[this->m_stack
.size () - 1];
1884 this->m_stack
[this->m_stack
.size () - 1]
1885 = this->m_stack
[this->m_stack
.size () - 2];
1886 this->m_stack
[this->m_stack
.size () - 2]
1887 = this->m_stack
[this->m_stack
.size () - 3];
1888 this->m_stack
[this->m_stack
.size () - 3] = temp
;
1893 case DW_OP_deref_size
:
1894 case DW_OP_deref_type
:
1895 case DW_OP_GNU_deref_type
:
1897 int addr_size
= (op
== DW_OP_deref
? this->m_addr_size
: *op_ptr
++);
1898 gdb_byte
*buf
= (gdb_byte
*) alloca (addr_size
);
1899 CORE_ADDR addr
= fetch_address (0);
1904 if (op
== DW_OP_deref_type
|| op
== DW_OP_GNU_deref_type
)
1906 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
1907 cu_offset type_die_cu_off
= (cu_offset
) uoffset
;
1908 type
= get_base_type (type_die_cu_off
);
1911 type
= address_type
;
1913 this->read_mem (buf
, addr
, addr_size
);
1915 /* If the size of the object read from memory is different
1916 from the type length, we need to zero-extend it. */
1917 if (type
->length () != addr_size
)
1920 extract_unsigned_integer (buf
, addr_size
, byte_order
);
1922 buf
= (gdb_byte
*) alloca (type
->length ());
1923 store_unsigned_integer (buf
, type
->length (),
1927 result_val
= value_from_contents_and_address (type
, buf
, addr
);
1934 case DW_OP_plus_uconst
:
1936 /* Unary operations. */
1937 result_val
= fetch (0);
1943 if (value_less (result_val
,
1944 value::zero (result_val
->type (), not_lval
)))
1945 result_val
= value_neg (result_val
);
1948 result_val
= value_neg (result_val
);
1951 dwarf_require_integral (result_val
->type ());
1952 result_val
= value_complement (result_val
);
1954 case DW_OP_plus_uconst
:
1955 dwarf_require_integral (result_val
->type ());
1956 result
= value_as_long (result_val
);
1957 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
1959 result_val
= value_from_ulongest (address_type
, result
);
1983 /* Binary operations. */
1984 struct value
*first
, *second
;
1992 if (! base_types_equal_p (first
->type (), second
->type ()))
1993 error (_("Incompatible types on DWARF stack"));
1998 dwarf_require_integral (first
->type ());
1999 dwarf_require_integral (second
->type ());
2000 result_val
= value_binop (first
, second
, BINOP_BITWISE_AND
);
2003 result_val
= value_binop (first
, second
, BINOP_DIV
);
2006 result_val
= value_binop (first
, second
, BINOP_SUB
);
2011 struct type
*orig_type
= first
->type ();
2013 /* We have to special-case "old-style" untyped values
2014 -- these must have mod computed using unsigned
2016 if (orig_type
== address_type
)
2018 struct type
*utype
= get_unsigned_type (arch
, orig_type
);
2021 first
= value_cast (utype
, first
);
2022 second
= value_cast (utype
, second
);
2024 /* Note that value_binop doesn't handle float or
2025 decimal float here. This seems unimportant. */
2026 result_val
= value_binop (first
, second
, BINOP_MOD
);
2028 result_val
= value_cast (orig_type
, result_val
);
2032 result_val
= value_binop (first
, second
, BINOP_MUL
);
2035 dwarf_require_integral (first
->type ());
2036 dwarf_require_integral (second
->type ());
2037 result_val
= value_binop (first
, second
, BINOP_BITWISE_IOR
);
2040 result_val
= value_binop (first
, second
, BINOP_ADD
);
2043 dwarf_require_integral (first
->type ());
2044 dwarf_require_integral (second
->type ());
2045 result_val
= value_binop (first
, second
, BINOP_LSH
);
2048 dwarf_require_integral (first
->type ());
2049 dwarf_require_integral (second
->type ());
2050 if (!first
->type ()->is_unsigned ())
2053 = get_unsigned_type (arch
, first
->type ());
2055 first
= value_cast (utype
, first
);
2058 result_val
= value_binop (first
, second
, BINOP_RSH
);
2059 /* Make sure we wind up with the same type we started
2061 if (result_val
->type () != second
->type ())
2062 result_val
= value_cast (second
->type (), result_val
);
2065 dwarf_require_integral (first
->type ());
2066 dwarf_require_integral (second
->type ());
2067 if (first
->type ()->is_unsigned ())
2070 = get_signed_type (arch
, first
->type ());
2072 first
= value_cast (stype
, first
);
2075 result_val
= value_binop (first
, second
, BINOP_RSH
);
2076 /* Make sure we wind up with the same type we started
2078 if (result_val
->type () != second
->type ())
2079 result_val
= value_cast (second
->type (), result_val
);
2082 dwarf_require_integral (first
->type ());
2083 dwarf_require_integral (second
->type ());
2084 result_val
= value_binop (first
, second
, BINOP_BITWISE_XOR
);
2087 /* A <= B is !(B < A). */
2088 result
= ! value_less (second
, first
);
2089 result_val
= value_from_ulongest (address_type
, result
);
2092 /* A >= B is !(A < B). */
2093 result
= ! value_less (first
, second
);
2094 result_val
= value_from_ulongest (address_type
, result
);
2097 result
= value_equal (first
, second
);
2098 result_val
= value_from_ulongest (address_type
, result
);
2101 result
= value_less (first
, second
);
2102 result_val
= value_from_ulongest (address_type
, result
);
2105 /* A > B is B < A. */
2106 result
= value_less (second
, first
);
2107 result_val
= value_from_ulongest (address_type
, result
);
2110 result
= ! value_equal (first
, second
);
2111 result_val
= value_from_ulongest (address_type
, result
);
2114 internal_error (_("Can't be reached."));
2119 case DW_OP_call_frame_cfa
:
2120 ensure_have_frame (this->m_frame
, "DW_OP_call_frame_cfa");
2122 result
= dwarf2_frame_cfa (this->m_frame
);
2123 result_val
= value_from_ulongest (address_type
, result
);
2124 in_stack_memory
= true;
2127 case DW_OP_GNU_push_tls_address
:
2128 case DW_OP_form_tls_address
:
2129 /* Variable is at a constant offset in the thread-local
2130 storage block into the objfile for the current thread and
2131 the dynamic linker module containing this expression. Here
2132 we return returns the offset from that base. The top of the
2133 stack has the offset from the beginning of the thread
2134 control block at which the variable is located. Nothing
2135 should follow this operator, so the top of stack would be
2137 result
= value_as_long (fetch (0));
2139 result
= target_translate_tls_address (this->m_per_objfile
->objfile
,
2141 result_val
= value_from_ulongest (address_type
, result
);
2145 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
2154 offset
= extract_signed_integer (op_ptr
, 2, byte_order
);
2157 dwarf_require_integral (val
->type ());
2158 if (value_as_long (val
) != 0)
2171 /* Record the piece. */
2172 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
2173 add_piece (8 * size
, 0);
2175 /* Pop off the address/regnum, and reset the location
2177 if (this->m_location
!= DWARF_VALUE_LITERAL
2178 && this->m_location
!= DWARF_VALUE_OPTIMIZED_OUT
)
2180 this->m_location
= DWARF_VALUE_MEMORY
;
2184 case DW_OP_bit_piece
:
2186 uint64_t size
, uleb_offset
;
2188 /* Record the piece. */
2189 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &size
);
2190 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uleb_offset
);
2191 add_piece (size
, uleb_offset
);
2193 /* Pop off the address/regnum, and reset the location
2195 if (this->m_location
!= DWARF_VALUE_LITERAL
2196 && this->m_location
!= DWARF_VALUE_OPTIMIZED_OUT
)
2198 this->m_location
= DWARF_VALUE_MEMORY
;
2202 case DW_OP_GNU_uninit
:
2203 dwarf_expr_require_composition (op_ptr
, op_end
, "DW_OP_GNU_uninit");
2204 this->m_initialized
= false;
2210 = (cu_offset
) extract_unsigned_integer (op_ptr
, 2, byte_order
);
2212 this->dwarf_call (cu_off
);
2219 = (cu_offset
) extract_unsigned_integer (op_ptr
, 4, byte_order
);
2221 this->dwarf_call (cu_off
);
2225 case DW_OP_GNU_variable_value
:
2227 ensure_have_per_cu (this->m_per_cu
, "DW_OP_GNU_variable_value");
2228 int ref_addr_size
= this->m_per_cu
->ref_addr_size ();
2230 sect_offset sect_off
2231 = (sect_offset
) extract_unsigned_integer (op_ptr
,
2234 op_ptr
+= ref_addr_size
;
2235 result_val
= sect_variable_value (sect_off
, this->m_per_cu
,
2236 this->m_per_objfile
);
2237 result_val
= value_cast (address_type
, result_val
);
2241 case DW_OP_entry_value
:
2242 case DW_OP_GNU_entry_value
:
2245 CORE_ADDR deref_size
;
2246 union call_site_parameter_u kind_u
;
2248 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &len
);
2249 if (op_ptr
+ len
> op_end
)
2250 error (_("DW_OP_entry_value: too few bytes available."));
2252 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg (op_ptr
, op_ptr
+ len
);
2253 if (kind_u
.dwarf_reg
!= -1)
2256 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG
,
2258 -1 /* deref_size */);
2262 kind_u
.dwarf_reg
= dwarf_block_to_dwarf_reg_deref (op_ptr
,
2265 if (kind_u
.dwarf_reg
!= -1)
2267 if (deref_size
== -1)
2268 deref_size
= this->m_addr_size
;
2270 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG
,
2271 kind_u
, deref_size
);
2275 error (_("DWARF-2 expression error: DW_OP_entry_value is "
2276 "supported only for single DW_OP_reg* "
2277 "or for DW_OP_breg*(0)+DW_OP_deref*"));
2280 case DW_OP_GNU_parameter_ref
:
2282 union call_site_parameter_u kind_u
;
2285 = (cu_offset
) extract_unsigned_integer (op_ptr
, 4, byte_order
);
2287 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET
,
2289 -1 /* deref_size */);
2293 case DW_OP_const_type
:
2294 case DW_OP_GNU_const_type
:
2297 const gdb_byte
*data
;
2300 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
2301 cu_offset type_die_cu_off
= (cu_offset
) uoffset
;
2307 type
= get_base_type (type_die_cu_off
);
2309 if (type
->length () != n
)
2310 error (_("DW_OP_const_type has different sizes for type and data"));
2312 result_val
= value_from_contents (type
, data
);
2316 case DW_OP_regval_type
:
2317 case DW_OP_GNU_regval_type
:
2319 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, ®
);
2320 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
2321 cu_offset type_die_cu_off
= (cu_offset
) uoffset
;
2323 ensure_have_frame (this->m_frame
, "DW_OP_regval_type");
2325 struct type
*type
= get_base_type (type_die_cu_off
);
2327 = dwarf_reg_to_regnum_or_error (get_frame_arch (this->m_frame
),
2329 result_val
= value_from_register (type
, regnum
, this->m_frame
);
2334 case DW_OP_GNU_convert
:
2335 case DW_OP_reinterpret
:
2336 case DW_OP_GNU_reinterpret
:
2340 op_ptr
= safe_read_uleb128 (op_ptr
, op_end
, &uoffset
);
2341 cu_offset type_die_cu_off
= (cu_offset
) uoffset
;
2343 if (to_underlying (type_die_cu_off
) == 0)
2344 type
= address_type
;
2346 type
= get_base_type (type_die_cu_off
);
2348 result_val
= fetch (0);
2351 if (op
== DW_OP_convert
|| op
== DW_OP_GNU_convert
)
2352 result_val
= value_cast (type
, result_val
);
2353 else if (type
== result_val
->type ())
2357 else if (type
->length ()
2358 != result_val
->type ()->length ())
2359 error (_("DW_OP_reinterpret has wrong size"));
2362 = value_from_contents (type
,
2363 result_val
->contents_all ().data ());
2367 case DW_OP_push_object_address
:
2368 /* Return the address of the object we are currently observing. */
2369 if (this->m_addr_info
== nullptr
2370 || (this->m_addr_info
->valaddr
.data () == nullptr
2371 && this->m_addr_info
->addr
== 0))
2372 error (_("Location address is not set."));
2375 = value_from_ulongest (address_type
, this->m_addr_info
->addr
);
2379 error (_("Unhandled dwarf expression opcode 0x%x"), op
);
2382 /* Most things push a result value. */
2383 gdb_assert (result_val
!= NULL
);
2384 push (result_val
, in_stack_memory
);
2389 /* To simplify our main caller, if the result is an implicit
2390 pointer, then make a pieced value. This is ok because we can't
2391 have implicit pointers in contexts where pieces are invalid. */
2392 if (this->m_location
== DWARF_VALUE_IMPLICIT_POINTER
)
2393 add_piece (8 * this->m_addr_size
, 0);
2395 this->m_recursion_depth
--;
2396 gdb_assert (this->m_recursion_depth
>= 0);