Automatic date update in version.in
[binutils-gdb.git] / gdb / dwarf2 / expr.c
blob9986258333667b178c6afa2ba91eb661b6f48e6e
1 /* DWARF 2 Expression Evaluator.
3 Copyright (C) 2001-2022 Free Software Foundation, Inc.
5 Contributed by Daniel Berlin (dan@dberlin.org)
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "defs.h"
23 #include "block.h"
24 #include "symtab.h"
25 #include "gdbtypes.h"
26 #include "value.h"
27 #include "gdbcore.h"
28 #include "dwarf2.h"
29 #include "dwarf2/expr.h"
30 #include "dwarf2/loc.h"
31 #include "dwarf2/read.h"
32 #include "frame.h"
33 #include "gdbsupport/underlying.h"
34 #include "gdbarch.h"
35 #include "objfiles.h"
37 /* Cookie for gdbarch data. */
39 static struct gdbarch_data *dwarf_arch_cookie;
41 /* This holds gdbarch-specific types used by the DWARF expression
42 evaluator. See comments in execute_stack_op. */
44 struct dwarf_gdbarch_types
46 struct type *dw_types[3];
49 /* Allocate and fill in dwarf_gdbarch_types for an arch. */
51 static void *
52 dwarf_gdbarch_types_init (struct gdbarch *gdbarch)
54 struct dwarf_gdbarch_types *types
55 = GDBARCH_OBSTACK_ZALLOC (gdbarch, struct dwarf_gdbarch_types);
57 /* The types themselves are lazily initialized. */
59 return types;
62 /* Ensure that a FRAME is defined, throw an exception otherwise. */
64 static void
65 ensure_have_frame (frame_info *frame, const char *op_name)
67 if (frame == nullptr)
68 throw_error (GENERIC_ERROR,
69 _("%s evaluation requires a frame."), op_name);
72 /* Ensure that a PER_CU is defined and throw an exception otherwise. */
74 static void
75 ensure_have_per_cu (dwarf2_per_cu_data *per_cu, const char* op_name)
77 if (per_cu == nullptr)
78 throw_error (GENERIC_ERROR,
79 _("%s evaluation requires a compilation unit."), op_name);
82 /* Return the number of bytes overlapping a contiguous chunk of N_BITS
83 bits whose first bit is located at bit offset START. */
85 static size_t
86 bits_to_bytes (ULONGEST start, ULONGEST n_bits)
88 return (start % HOST_CHAR_BIT + n_bits + HOST_CHAR_BIT - 1) / HOST_CHAR_BIT;
91 /* See expr.h. */
93 CORE_ADDR
94 read_addr_from_reg (frame_info *frame, int reg)
96 struct gdbarch *gdbarch = get_frame_arch (frame);
97 int regnum = dwarf_reg_to_regnum_or_error (gdbarch, reg);
99 return address_from_register (regnum, frame);
102 struct piece_closure
104 /* Reference count. */
105 int refc = 0;
107 /* The objfile from which this closure's expression came. */
108 dwarf2_per_objfile *per_objfile = nullptr;
110 /* The CU from which this closure's expression came. */
111 dwarf2_per_cu_data *per_cu = nullptr;
113 /* The pieces describing this variable. */
114 std::vector<dwarf_expr_piece> pieces;
116 /* Frame ID of frame to which a register value is relative, used
117 only by DWARF_VALUE_REGISTER. */
118 struct frame_id frame_id;
121 /* Allocate a closure for a value formed from separately-described
122 PIECES. */
124 static piece_closure *
125 allocate_piece_closure (dwarf2_per_cu_data *per_cu,
126 dwarf2_per_objfile *per_objfile,
127 std::vector<dwarf_expr_piece> &&pieces,
128 frame_info *frame)
130 piece_closure *c = new piece_closure;
132 c->refc = 1;
133 /* We must capture this here due to sharing of DWARF state. */
134 c->per_objfile = per_objfile;
135 c->per_cu = per_cu;
136 c->pieces = std::move (pieces);
137 if (frame == nullptr)
138 c->frame_id = null_frame_id;
139 else
140 c->frame_id = get_frame_id (frame);
142 for (dwarf_expr_piece &piece : c->pieces)
143 if (piece.location == DWARF_VALUE_STACK)
144 value_incref (piece.v.value);
146 return c;
149 /* Read or write a pieced value V. If FROM != NULL, operate in "write
150 mode": copy FROM into the pieces comprising V. If FROM == NULL,
151 operate in "read mode": fetch the contents of the (lazy) value V by
152 composing it from its pieces. If CHECK_OPTIMIZED is true, then no
153 reading or writing is done; instead the return value of this
154 function is true if any piece is optimized out. When
155 CHECK_OPTIMIZED is true, FROM must be nullptr. */
157 static bool
158 rw_pieced_value (value *v, value *from, bool check_optimized)
160 int i;
161 LONGEST offset = 0, max_offset;
162 gdb_byte *v_contents;
163 const gdb_byte *from_contents;
164 piece_closure *c
165 = (piece_closure *) value_computed_closure (v);
166 gdb::byte_vector buffer;
167 bool bits_big_endian = type_byte_order (value_type (v)) == BFD_ENDIAN_BIG;
169 gdb_assert (!check_optimized || from == nullptr);
170 if (from != nullptr)
172 from_contents = value_contents (from).data ();
173 v_contents = nullptr;
175 else
177 if (value_type (v) != value_enclosing_type (v))
178 internal_error (__FILE__, __LINE__,
179 _("Should not be able to create a lazy value with "
180 "an enclosing type"));
181 if (check_optimized)
182 v_contents = nullptr;
183 else
184 v_contents = value_contents_raw (v).data ();
185 from_contents = nullptr;
188 ULONGEST bits_to_skip = 8 * value_offset (v);
189 if (value_bitsize (v))
191 bits_to_skip += (8 * value_offset (value_parent (v))
192 + value_bitpos (v));
193 if (from != nullptr
194 && (type_byte_order (value_type (from))
195 == BFD_ENDIAN_BIG))
197 /* Use the least significant bits of FROM. */
198 max_offset = 8 * TYPE_LENGTH (value_type (from));
199 offset = max_offset - value_bitsize (v);
201 else
202 max_offset = value_bitsize (v);
204 else
205 max_offset = 8 * TYPE_LENGTH (value_type (v));
207 /* Advance to the first non-skipped piece. */
208 for (i = 0; i < c->pieces.size () && bits_to_skip >= c->pieces[i].size; i++)
209 bits_to_skip -= c->pieces[i].size;
211 for (; i < c->pieces.size () && offset < max_offset; i++)
213 dwarf_expr_piece *p = &c->pieces[i];
214 size_t this_size_bits, this_size;
216 this_size_bits = p->size - bits_to_skip;
217 if (this_size_bits > max_offset - offset)
218 this_size_bits = max_offset - offset;
220 switch (p->location)
222 case DWARF_VALUE_REGISTER:
224 frame_info *frame = frame_find_by_id (c->frame_id);
225 gdbarch *arch = get_frame_arch (frame);
226 int gdb_regnum = dwarf_reg_to_regnum_or_error (arch, p->v.regno);
227 ULONGEST reg_bits = 8 * register_size (arch, gdb_regnum);
228 int optim, unavail;
230 if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG
231 && p->offset + p->size < reg_bits)
233 /* Big-endian, and we want less than full size. */
234 bits_to_skip += reg_bits - (p->offset + p->size);
236 else
237 bits_to_skip += p->offset;
239 this_size = bits_to_bytes (bits_to_skip, this_size_bits);
240 buffer.resize (this_size);
242 if (from == nullptr)
244 /* Read mode. */
245 if (!get_frame_register_bytes (frame, gdb_regnum,
246 bits_to_skip / 8,
247 buffer, &optim, &unavail))
249 if (optim)
251 if (check_optimized)
252 return true;
253 mark_value_bits_optimized_out (v, offset,
254 this_size_bits);
256 if (unavail && !check_optimized)
257 mark_value_bits_unavailable (v, offset,
258 this_size_bits);
259 break;
262 if (!check_optimized)
263 copy_bitwise (v_contents, offset,
264 buffer.data (), bits_to_skip % 8,
265 this_size_bits, bits_big_endian);
267 else
269 /* Write mode. */
270 if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
272 /* Data is copied non-byte-aligned into the register.
273 Need some bits from original register value. */
274 get_frame_register_bytes (frame, gdb_regnum,
275 bits_to_skip / 8,
276 buffer, &optim, &unavail);
277 if (optim)
278 throw_error (OPTIMIZED_OUT_ERROR,
279 _("Can't do read-modify-write to "
280 "update bitfield; containing word "
281 "has been optimized out"));
282 if (unavail)
283 throw_error (NOT_AVAILABLE_ERROR,
284 _("Can't do read-modify-write to "
285 "update bitfield; containing word "
286 "is unavailable"));
289 copy_bitwise (buffer.data (), bits_to_skip % 8,
290 from_contents, offset,
291 this_size_bits, bits_big_endian);
292 put_frame_register_bytes (frame, gdb_regnum,
293 bits_to_skip / 8,
294 buffer);
297 break;
299 case DWARF_VALUE_MEMORY:
301 if (check_optimized)
302 break;
304 bits_to_skip += p->offset;
306 CORE_ADDR start_addr = p->v.mem.addr + bits_to_skip / 8;
308 if (bits_to_skip % 8 == 0 && this_size_bits % 8 == 0
309 && offset % 8 == 0)
311 /* Everything is byte-aligned; no buffer needed. */
312 if (from != nullptr)
313 write_memory_with_notification (start_addr,
314 (from_contents
315 + offset / 8),
316 this_size_bits / 8);
317 else
318 read_value_memory (v, offset,
319 p->v.mem.in_stack_memory,
320 p->v.mem.addr + bits_to_skip / 8,
321 v_contents + offset / 8,
322 this_size_bits / 8);
323 break;
326 this_size = bits_to_bytes (bits_to_skip, this_size_bits);
327 buffer.resize (this_size);
329 if (from == nullptr)
331 /* Read mode. */
332 read_value_memory (v, offset,
333 p->v.mem.in_stack_memory,
334 p->v.mem.addr + bits_to_skip / 8,
335 buffer.data (), this_size);
336 copy_bitwise (v_contents, offset,
337 buffer.data (), bits_to_skip % 8,
338 this_size_bits, bits_big_endian);
340 else
342 /* Write mode. */
343 if (bits_to_skip % 8 != 0 || this_size_bits % 8 != 0)
345 if (this_size <= 8)
347 /* Perform a single read for small sizes. */
348 read_memory (start_addr, buffer.data (),
349 this_size);
351 else
353 /* Only the first and last bytes can possibly have
354 any bits reused. */
355 read_memory (start_addr, buffer.data (), 1);
356 read_memory (start_addr + this_size - 1,
357 &buffer[this_size - 1], 1);
361 copy_bitwise (buffer.data (), bits_to_skip % 8,
362 from_contents, offset,
363 this_size_bits, bits_big_endian);
364 write_memory_with_notification (start_addr,
365 buffer.data (),
366 this_size);
369 break;
371 case DWARF_VALUE_STACK:
373 if (check_optimized)
374 break;
376 if (from != nullptr)
378 mark_value_bits_optimized_out (v, offset, this_size_bits);
379 break;
382 gdbarch *objfile_gdbarch = c->per_objfile->objfile->arch ();
383 ULONGEST stack_value_size_bits
384 = 8 * TYPE_LENGTH (value_type (p->v.value));
386 /* Use zeroes if piece reaches beyond stack value. */
387 if (p->offset + p->size > stack_value_size_bits)
388 break;
390 /* Piece is anchored at least significant bit end. */
391 if (gdbarch_byte_order (objfile_gdbarch) == BFD_ENDIAN_BIG)
392 bits_to_skip += stack_value_size_bits - p->offset - p->size;
393 else
394 bits_to_skip += p->offset;
396 copy_bitwise (v_contents, offset,
397 value_contents_all (p->v.value).data (),
398 bits_to_skip,
399 this_size_bits, bits_big_endian);
401 break;
403 case DWARF_VALUE_LITERAL:
405 if (check_optimized)
406 break;
408 if (from != nullptr)
410 mark_value_bits_optimized_out (v, offset, this_size_bits);
411 break;
414 ULONGEST literal_size_bits = 8 * p->v.literal.length;
415 size_t n = this_size_bits;
417 /* Cut off at the end of the implicit value. */
418 bits_to_skip += p->offset;
419 if (bits_to_skip >= literal_size_bits)
420 break;
421 if (n > literal_size_bits - bits_to_skip)
422 n = literal_size_bits - bits_to_skip;
424 copy_bitwise (v_contents, offset,
425 p->v.literal.data, bits_to_skip,
426 n, bits_big_endian);
428 break;
430 case DWARF_VALUE_IMPLICIT_POINTER:
431 if (from != nullptr)
433 mark_value_bits_optimized_out (v, offset, this_size_bits);
434 break;
437 /* These bits show up as zeros -- but do not cause the value to
438 be considered optimized-out. */
439 break;
441 case DWARF_VALUE_OPTIMIZED_OUT:
442 if (check_optimized)
443 return true;
444 mark_value_bits_optimized_out (v, offset, this_size_bits);
445 break;
447 default:
448 internal_error (__FILE__, __LINE__, _("invalid location type"));
451 offset += this_size_bits;
452 bits_to_skip = 0;
455 return false;
458 static void
459 read_pieced_value (value *v)
461 rw_pieced_value (v, nullptr, false);
464 static void
465 write_pieced_value (value *to, value *from)
467 rw_pieced_value (to, from, false);
470 static bool
471 is_optimized_out_pieced_value (value *v)
473 return rw_pieced_value (v, nullptr, true);
476 /* An implementation of an lval_funcs method to see whether a value is
477 a synthetic pointer. */
479 static int
480 check_pieced_synthetic_pointer (const value *value, LONGEST bit_offset,
481 int bit_length)
483 piece_closure *c = (piece_closure *) value_computed_closure (value);
484 int i;
486 bit_offset += 8 * value_offset (value);
487 if (value_bitsize (value))
488 bit_offset += value_bitpos (value);
490 for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
492 dwarf_expr_piece *p = &c->pieces[i];
493 size_t this_size_bits = p->size;
495 if (bit_offset > 0)
497 if (bit_offset >= this_size_bits)
499 bit_offset -= this_size_bits;
500 continue;
503 bit_length -= this_size_bits - bit_offset;
504 bit_offset = 0;
506 else
507 bit_length -= this_size_bits;
509 if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
510 return 0;
513 return 1;
516 /* An implementation of an lval_funcs method to indirect through a
517 pointer. This handles the synthetic pointer case when needed. */
519 static value *
520 indirect_pieced_value (value *value)
522 piece_closure *c
523 = (piece_closure *) value_computed_closure (value);
524 int i;
525 dwarf_expr_piece *piece = NULL;
527 struct type *type = check_typedef (value_type (value));
528 if (type->code () != TYPE_CODE_PTR)
529 return NULL;
531 int bit_length = 8 * TYPE_LENGTH (type);
532 LONGEST bit_offset = 8 * value_offset (value);
533 if (value_bitsize (value))
534 bit_offset += value_bitpos (value);
536 for (i = 0; i < c->pieces.size () && bit_length > 0; i++)
538 dwarf_expr_piece *p = &c->pieces[i];
539 size_t this_size_bits = p->size;
541 if (bit_offset > 0)
543 if (bit_offset >= this_size_bits)
545 bit_offset -= this_size_bits;
546 continue;
549 bit_length -= this_size_bits - bit_offset;
550 bit_offset = 0;
552 else
553 bit_length -= this_size_bits;
555 if (p->location != DWARF_VALUE_IMPLICIT_POINTER)
556 return NULL;
558 if (bit_length != 0)
559 error (_("Invalid use of DW_OP_implicit_pointer"));
561 piece = p;
562 break;
565 gdb_assert (piece != NULL && c->per_cu != nullptr);
566 frame_info *frame = get_selected_frame (_("No frame selected."));
568 /* This is an offset requested by GDB, such as value subscripts.
569 However, due to how synthetic pointers are implemented, this is
570 always presented to us as a pointer type. This means we have to
571 sign-extend it manually as appropriate. Use raw
572 extract_signed_integer directly rather than value_as_address and
573 sign extend afterwards on architectures that would need it
574 (mostly everywhere except MIPS, which has signed addresses) as
575 the later would go through gdbarch_pointer_to_address and thus
576 return a CORE_ADDR with high bits set on architectures that
577 encode address spaces and other things in CORE_ADDR. */
578 bfd_endian byte_order = gdbarch_byte_order (get_frame_arch (frame));
579 LONGEST byte_offset
580 = extract_signed_integer (value_contents (value), byte_order);
581 byte_offset += piece->v.ptr.offset;
583 return indirect_synthetic_pointer (piece->v.ptr.die_sect_off,
584 byte_offset, c->per_cu,
585 c->per_objfile, frame, type);
588 /* Implementation of the coerce_ref method of lval_funcs for synthetic C++
589 references. */
591 static value *
592 coerce_pieced_ref (const value *value)
594 struct type *type = check_typedef (value_type (value));
596 if (value_bits_synthetic_pointer (value, value_embedded_offset (value),
597 TARGET_CHAR_BIT * TYPE_LENGTH (type)))
599 const piece_closure *closure
600 = (piece_closure *) value_computed_closure (value);
601 frame_info *frame
602 = get_selected_frame (_("No frame selected."));
604 /* gdb represents synthetic pointers as pieced values with a single
605 piece. */
606 gdb_assert (closure != NULL);
607 gdb_assert (closure->pieces.size () == 1);
609 return indirect_synthetic_pointer
610 (closure->pieces[0].v.ptr.die_sect_off,
611 closure->pieces[0].v.ptr.offset,
612 closure->per_cu, closure->per_objfile, frame, type);
614 else
616 /* Else: not a synthetic reference; do nothing. */
617 return NULL;
621 static void *
622 copy_pieced_value_closure (const value *v)
624 piece_closure *c = (piece_closure *) value_computed_closure (v);
626 ++c->refc;
627 return c;
630 static void
631 free_pieced_value_closure (value *v)
633 piece_closure *c = (piece_closure *) value_computed_closure (v);
635 --c->refc;
636 if (c->refc == 0)
638 for (dwarf_expr_piece &p : c->pieces)
639 if (p.location == DWARF_VALUE_STACK)
640 value_decref (p.v.value);
642 delete c;
646 /* Functions for accessing a variable described by DW_OP_piece. */
647 static const struct lval_funcs pieced_value_funcs = {
648 read_pieced_value,
649 write_pieced_value,
650 is_optimized_out_pieced_value,
651 indirect_pieced_value,
652 coerce_pieced_ref,
653 check_pieced_synthetic_pointer,
654 copy_pieced_value_closure,
655 free_pieced_value_closure
658 /* Given context CTX, section offset SECT_OFF, and compilation unit
659 data PER_CU, execute the "variable value" operation on the DIE
660 found at SECT_OFF. */
662 static value *
663 sect_variable_value (sect_offset sect_off,
664 dwarf2_per_cu_data *per_cu,
665 dwarf2_per_objfile *per_objfile)
667 const char *var_name = nullptr;
668 struct type *die_type
669 = dwarf2_fetch_die_type_sect_off (sect_off, per_cu, per_objfile,
670 &var_name);
672 if (die_type == NULL)
673 error (_("Bad DW_OP_GNU_variable_value DIE."));
675 /* Note: Things still work when the following test is removed. This
676 test and error is here to conform to the proposed specification. */
677 if (die_type->code () != TYPE_CODE_INT
678 && die_type->code () != TYPE_CODE_ENUM
679 && die_type->code () != TYPE_CODE_RANGE
680 && die_type->code () != TYPE_CODE_PTR)
681 error (_("Type of DW_OP_GNU_variable_value DIE must be an integer or pointer."));
683 if (var_name != nullptr)
685 value *result = compute_var_value (var_name);
686 if (result != nullptr)
687 return result;
690 struct type *type = lookup_pointer_type (die_type);
691 frame_info *frame = get_selected_frame (_("No frame selected."));
692 return indirect_synthetic_pointer (sect_off, 0, per_cu, per_objfile, frame,
693 type, true);
696 /* Return the type used for DWARF operations where the type is
697 unspecified in the DWARF spec. Only certain sizes are
698 supported. */
700 struct type *
701 dwarf_expr_context::address_type () const
703 gdbarch *arch = this->m_per_objfile->objfile->arch ();
704 dwarf_gdbarch_types *types
705 = (dwarf_gdbarch_types *) gdbarch_data (arch, dwarf_arch_cookie);
706 int ndx;
708 if (this->m_addr_size == 2)
709 ndx = 0;
710 else if (this->m_addr_size == 4)
711 ndx = 1;
712 else if (this->m_addr_size == 8)
713 ndx = 2;
714 else
715 error (_("Unsupported address size in DWARF expressions: %d bits"),
716 8 * this->m_addr_size);
718 if (types->dw_types[ndx] == NULL)
719 types->dw_types[ndx]
720 = arch_integer_type (arch, 8 * this->m_addr_size,
721 0, "<signed DWARF address type>");
723 return types->dw_types[ndx];
726 /* Create a new context for the expression evaluator. */
728 dwarf_expr_context::dwarf_expr_context (dwarf2_per_objfile *per_objfile,
729 int addr_size)
730 : m_addr_size (addr_size),
731 m_per_objfile (per_objfile)
735 /* Push VALUE onto the stack. */
737 void
738 dwarf_expr_context::push (struct value *value, bool in_stack_memory)
740 this->m_stack.emplace_back (value, in_stack_memory);
743 /* Push VALUE onto the stack. */
745 void
746 dwarf_expr_context::push_address (CORE_ADDR value, bool in_stack_memory)
748 push (value_from_ulongest (address_type (), value), in_stack_memory);
751 /* Pop the top item off of the stack. */
753 void
754 dwarf_expr_context::pop ()
756 if (this->m_stack.empty ())
757 error (_("dwarf expression stack underflow"));
759 this->m_stack.pop_back ();
762 /* Retrieve the N'th item on the stack. */
764 struct value *
765 dwarf_expr_context::fetch (int n)
767 if (this->m_stack.size () <= n)
768 error (_("Asked for position %d of stack, "
769 "stack only has %zu elements on it."),
770 n, this->m_stack.size ());
771 return this->m_stack[this->m_stack.size () - (1 + n)].value;
774 /* See expr.h. */
776 void
777 dwarf_expr_context::get_frame_base (const gdb_byte **start,
778 size_t * length)
780 ensure_have_frame (this->m_frame, "DW_OP_fbreg");
782 const block *bl = get_frame_block (this->m_frame, NULL);
784 if (bl == NULL)
785 error (_("frame address is not available."));
787 /* Use block_linkage_function, which returns a real (not inlined)
788 function, instead of get_frame_function, which may return an
789 inlined function. */
790 symbol *framefunc = block_linkage_function (bl);
792 /* If we found a frame-relative symbol then it was certainly within
793 some function associated with a frame. If we can't find the frame,
794 something has gone wrong. */
795 gdb_assert (framefunc != NULL);
797 func_get_frame_base_dwarf_block (framefunc,
798 get_frame_address_in_block (this->m_frame),
799 start, length);
802 /* See expr.h. */
804 struct type *
805 dwarf_expr_context::get_base_type (cu_offset die_cu_off)
807 if (this->m_per_cu == nullptr)
808 return builtin_type (this->m_per_objfile->objfile->arch ())->builtin_int;
810 struct type *result = dwarf2_get_die_type (die_cu_off, this->m_per_cu,
811 this->m_per_objfile);
813 if (result == nullptr)
814 error (_("Could not find type for operation"));
816 return result;
819 /* See expr.h. */
821 void
822 dwarf_expr_context::dwarf_call (cu_offset die_cu_off)
824 ensure_have_per_cu (this->m_per_cu, "DW_OP_call");
826 frame_info *frame = this->m_frame;
828 auto get_pc_from_frame = [frame] ()
830 ensure_have_frame (frame, "DW_OP_call");
831 return get_frame_address_in_block (frame);
834 dwarf2_locexpr_baton block
835 = dwarf2_fetch_die_loc_cu_off (die_cu_off, this->m_per_cu,
836 this->m_per_objfile, get_pc_from_frame);
838 /* DW_OP_call_ref is currently not supported. */
839 gdb_assert (block.per_cu == this->m_per_cu);
841 this->eval (block.data, block.size);
844 /* See expr.h. */
846 void
847 dwarf_expr_context::read_mem (gdb_byte *buf, CORE_ADDR addr,
848 size_t length)
850 if (length == 0)
851 return;
853 /* Prefer the passed-in memory, if it exists. */
854 if (this->m_addr_info != nullptr)
856 CORE_ADDR offset = addr - this->m_addr_info->addr;
858 if (offset < this->m_addr_info->valaddr.size ()
859 && offset + length <= this->m_addr_info->valaddr.size ())
861 memcpy (buf, this->m_addr_info->valaddr.data (), length);
862 return;
866 read_memory (addr, buf, length);
869 /* See expr.h. */
871 void
872 dwarf_expr_context::push_dwarf_reg_entry_value (call_site_parameter_kind kind,
873 call_site_parameter_u kind_u,
874 int deref_size)
876 ensure_have_per_cu (this->m_per_cu, "DW_OP_entry_value");
877 ensure_have_frame (this->m_frame, "DW_OP_entry_value");
879 dwarf2_per_cu_data *caller_per_cu;
880 dwarf2_per_objfile *caller_per_objfile;
881 frame_info *caller_frame = get_prev_frame (this->m_frame);
882 call_site_parameter *parameter
883 = dwarf_expr_reg_to_entry_parameter (this->m_frame, kind, kind_u,
884 &caller_per_cu,
885 &caller_per_objfile);
886 const gdb_byte *data_src
887 = deref_size == -1 ? parameter->value : parameter->data_value;
888 size_t size
889 = deref_size == -1 ? parameter->value_size : parameter->data_value_size;
891 /* DEREF_SIZE size is not verified here. */
892 if (data_src == nullptr)
893 throw_error (NO_ENTRY_VALUE_ERROR,
894 _("Cannot resolve DW_AT_call_data_value"));
896 /* We are about to evaluate an expression in the context of the caller
897 of the current frame. This evaluation context may be different from
898 the current (callee's) context), so temporarily set the caller's context.
900 It is possible for the caller to be from a different objfile from the
901 callee if the call is made through a function pointer. */
902 scoped_restore save_frame = make_scoped_restore (&this->m_frame,
903 caller_frame);
904 scoped_restore save_per_cu = make_scoped_restore (&this->m_per_cu,
905 caller_per_cu);
906 scoped_restore save_addr_info = make_scoped_restore (&this->m_addr_info,
907 nullptr);
908 scoped_restore save_per_objfile = make_scoped_restore (&this->m_per_objfile,
909 caller_per_objfile);
911 scoped_restore save_addr_size = make_scoped_restore (&this->m_addr_size);
912 this->m_addr_size = this->m_per_cu->addr_size ();
914 this->eval (data_src, size);
917 /* See expr.h. */
919 value *
920 dwarf_expr_context::fetch_result (struct type *type, struct type *subobj_type,
921 LONGEST subobj_offset, bool as_lval)
923 value *retval = nullptr;
924 gdbarch *arch = this->m_per_objfile->objfile->arch ();
926 if (type == nullptr)
927 type = address_type ();
929 if (subobj_type == nullptr)
930 subobj_type = type;
932 if (this->m_pieces.size () > 0)
934 ULONGEST bit_size = 0;
936 for (dwarf_expr_piece &piece : this->m_pieces)
937 bit_size += piece.size;
938 /* Complain if the expression is larger than the size of the
939 outer type. */
940 if (bit_size > 8 * TYPE_LENGTH (type))
941 invalid_synthetic_pointer ();
943 piece_closure *c
944 = allocate_piece_closure (this->m_per_cu, this->m_per_objfile,
945 std::move (this->m_pieces), this->m_frame);
946 retval = allocate_computed_value (subobj_type,
947 &pieced_value_funcs, c);
948 set_value_offset (retval, subobj_offset);
950 else
952 /* If AS_LVAL is false, means that the implicit conversion
953 from a location description to value is expected. */
954 if (!as_lval)
955 this->m_location = DWARF_VALUE_STACK;
957 switch (this->m_location)
959 case DWARF_VALUE_REGISTER:
961 gdbarch *f_arch = get_frame_arch (this->m_frame);
962 int dwarf_regnum
963 = longest_to_int (value_as_long (this->fetch (0)));
964 int gdb_regnum = dwarf_reg_to_regnum_or_error (f_arch,
965 dwarf_regnum);
967 if (subobj_offset != 0)
968 error (_("cannot use offset on synthetic pointer to register"));
970 gdb_assert (this->m_frame != NULL);
972 retval = value_from_register (subobj_type, gdb_regnum,
973 this->m_frame);
974 if (value_optimized_out (retval))
976 /* This means the register has undefined value / was
977 not saved. As we're computing the location of some
978 variable etc. in the program, not a value for
979 inspecting a register ($pc, $sp, etc.), return a
980 generic optimized out value instead, so that we show
981 <optimized out> instead of <not saved>. */
982 value *tmp = allocate_value (subobj_type);
983 value_contents_copy (tmp, 0, retval, 0,
984 TYPE_LENGTH (subobj_type));
985 retval = tmp;
988 break;
990 case DWARF_VALUE_MEMORY:
992 struct type *ptr_type;
993 CORE_ADDR address = this->fetch_address (0);
994 bool in_stack_memory = this->fetch_in_stack_memory (0);
996 /* DW_OP_deref_size (and possibly other operations too) may
997 create a pointer instead of an address. Ideally, the
998 pointer to address conversion would be performed as part
999 of those operations, but the type of the object to
1000 which the address refers is not known at the time of
1001 the operation. Therefore, we do the conversion here
1002 since the type is readily available. */
1004 switch (subobj_type->code ())
1006 case TYPE_CODE_FUNC:
1007 case TYPE_CODE_METHOD:
1008 ptr_type = builtin_type (arch)->builtin_func_ptr;
1009 break;
1010 default:
1011 ptr_type = builtin_type (arch)->builtin_data_ptr;
1012 break;
1014 address = value_as_address (value_from_pointer (ptr_type, address));
1016 retval = value_at_lazy (subobj_type,
1017 address + subobj_offset);
1018 if (in_stack_memory)
1019 set_value_stack (retval, 1);
1021 break;
1023 case DWARF_VALUE_STACK:
1025 value *val = this->fetch (0);
1026 size_t n = TYPE_LENGTH (value_type (val));
1027 size_t len = TYPE_LENGTH (subobj_type);
1028 size_t max = TYPE_LENGTH (type);
1030 if (subobj_offset + len > max)
1031 invalid_synthetic_pointer ();
1033 retval = allocate_value (subobj_type);
1035 /* The given offset is relative to the actual object. */
1036 if (gdbarch_byte_order (arch) == BFD_ENDIAN_BIG)
1037 subobj_offset += n - max;
1039 copy (value_contents_all (val).slice (subobj_offset, len),
1040 value_contents_raw (retval));
1042 break;
1044 case DWARF_VALUE_LITERAL:
1046 size_t n = TYPE_LENGTH (subobj_type);
1048 if (subobj_offset + n > this->m_len)
1049 invalid_synthetic_pointer ();
1051 retval = allocate_value (subobj_type);
1052 bfd_byte *contents = value_contents_raw (retval).data ();
1053 memcpy (contents, this->m_data + subobj_offset, n);
1055 break;
1057 case DWARF_VALUE_OPTIMIZED_OUT:
1058 retval = allocate_optimized_out_value (subobj_type);
1059 break;
1061 /* DWARF_VALUE_IMPLICIT_POINTER was converted to a pieced
1062 operation by execute_stack_op. */
1063 case DWARF_VALUE_IMPLICIT_POINTER:
1064 /* DWARF_VALUE_OPTIMIZED_OUT can't occur in this context --
1065 it can only be encountered when making a piece. */
1066 default:
1067 internal_error (__FILE__, __LINE__, _("invalid location type"));
1071 set_value_initialized (retval, this->m_initialized);
1073 return retval;
1076 /* See expr.h. */
1078 value *
1079 dwarf_expr_context::evaluate (const gdb_byte *addr, size_t len, bool as_lval,
1080 dwarf2_per_cu_data *per_cu, frame_info *frame,
1081 const struct property_addr_info *addr_info,
1082 struct type *type, struct type *subobj_type,
1083 LONGEST subobj_offset)
1085 this->m_per_cu = per_cu;
1086 this->m_frame = frame;
1087 this->m_addr_info = addr_info;
1089 eval (addr, len);
1090 return fetch_result (type, subobj_type, subobj_offset, as_lval);
1093 /* Require that TYPE be an integral type; throw an exception if not. */
1095 static void
1096 dwarf_require_integral (struct type *type)
1098 if (type->code () != TYPE_CODE_INT
1099 && type->code () != TYPE_CODE_CHAR
1100 && type->code () != TYPE_CODE_BOOL)
1101 error (_("integral type expected in DWARF expression"));
1104 /* Return the unsigned form of TYPE. TYPE is necessarily an integral
1105 type. */
1107 static struct type *
1108 get_unsigned_type (struct gdbarch *gdbarch, struct type *type)
1110 switch (TYPE_LENGTH (type))
1112 case 1:
1113 return builtin_type (gdbarch)->builtin_uint8;
1114 case 2:
1115 return builtin_type (gdbarch)->builtin_uint16;
1116 case 4:
1117 return builtin_type (gdbarch)->builtin_uint32;
1118 case 8:
1119 return builtin_type (gdbarch)->builtin_uint64;
1120 default:
1121 error (_("no unsigned variant found for type, while evaluating "
1122 "DWARF expression"));
1126 /* Return the signed form of TYPE. TYPE is necessarily an integral
1127 type. */
1129 static struct type *
1130 get_signed_type (struct gdbarch *gdbarch, struct type *type)
1132 switch (TYPE_LENGTH (type))
1134 case 1:
1135 return builtin_type (gdbarch)->builtin_int8;
1136 case 2:
1137 return builtin_type (gdbarch)->builtin_int16;
1138 case 4:
1139 return builtin_type (gdbarch)->builtin_int32;
1140 case 8:
1141 return builtin_type (gdbarch)->builtin_int64;
1142 default:
1143 error (_("no signed variant found for type, while evaluating "
1144 "DWARF expression"));
1148 /* Retrieve the N'th item on the stack, converted to an address. */
1150 CORE_ADDR
1151 dwarf_expr_context::fetch_address (int n)
1153 gdbarch *arch = this->m_per_objfile->objfile->arch ();
1154 value *result_val = fetch (n);
1155 bfd_endian byte_order = gdbarch_byte_order (arch);
1156 ULONGEST result;
1158 dwarf_require_integral (value_type (result_val));
1159 result = extract_unsigned_integer (value_contents (result_val), byte_order);
1161 /* For most architectures, calling extract_unsigned_integer() alone
1162 is sufficient for extracting an address. However, some
1163 architectures (e.g. MIPS) use signed addresses and using
1164 extract_unsigned_integer() will not produce a correct
1165 result. Make sure we invoke gdbarch_integer_to_address()
1166 for those architectures which require it. */
1167 if (gdbarch_integer_to_address_p (arch))
1169 gdb_byte *buf = (gdb_byte *) alloca (this->m_addr_size);
1170 type *int_type = get_unsigned_type (arch,
1171 value_type (result_val));
1173 store_unsigned_integer (buf, this->m_addr_size, byte_order, result);
1174 return gdbarch_integer_to_address (arch, int_type, buf);
1177 return (CORE_ADDR) result;
1180 /* Retrieve the in_stack_memory flag of the N'th item on the stack. */
1182 bool
1183 dwarf_expr_context::fetch_in_stack_memory (int n)
1185 if (this->m_stack.size () <= n)
1186 error (_("Asked for position %d of stack, "
1187 "stack only has %zu elements on it."),
1188 n, this->m_stack.size ());
1189 return this->m_stack[this->m_stack.size () - (1 + n)].in_stack_memory;
1192 /* Return true if the expression stack is empty. */
1194 bool
1195 dwarf_expr_context::stack_empty_p () const
1197 return m_stack.empty ();
1200 /* Add a new piece to the dwarf_expr_context's piece list. */
1201 void
1202 dwarf_expr_context::add_piece (ULONGEST size, ULONGEST offset)
1204 this->m_pieces.emplace_back ();
1205 dwarf_expr_piece &p = this->m_pieces.back ();
1207 p.location = this->m_location;
1208 p.size = size;
1209 p.offset = offset;
1211 if (p.location == DWARF_VALUE_LITERAL)
1213 p.v.literal.data = this->m_data;
1214 p.v.literal.length = this->m_len;
1216 else if (stack_empty_p ())
1218 p.location = DWARF_VALUE_OPTIMIZED_OUT;
1219 /* Also reset the context's location, for our callers. This is
1220 a somewhat strange approach, but this lets us avoid setting
1221 the location to DWARF_VALUE_MEMORY in all the individual
1222 cases in the evaluator. */
1223 this->m_location = DWARF_VALUE_OPTIMIZED_OUT;
1225 else if (p.location == DWARF_VALUE_MEMORY)
1227 p.v.mem.addr = fetch_address (0);
1228 p.v.mem.in_stack_memory = fetch_in_stack_memory (0);
1230 else if (p.location == DWARF_VALUE_IMPLICIT_POINTER)
1232 p.v.ptr.die_sect_off = (sect_offset) this->m_len;
1233 p.v.ptr.offset = value_as_long (fetch (0));
1235 else if (p.location == DWARF_VALUE_REGISTER)
1236 p.v.regno = value_as_long (fetch (0));
1237 else
1239 p.v.value = fetch (0);
1243 /* Evaluate the expression at ADDR (LEN bytes long). */
1245 void
1246 dwarf_expr_context::eval (const gdb_byte *addr, size_t len)
1248 int old_recursion_depth = this->m_recursion_depth;
1250 execute_stack_op (addr, addr + len);
1252 /* RECURSION_DEPTH becomes invalid if an exception was thrown here. */
1254 gdb_assert (this->m_recursion_depth == old_recursion_depth);
1257 /* Helper to read a uleb128 value or throw an error. */
1259 const gdb_byte *
1260 safe_read_uleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1261 uint64_t *r)
1263 buf = gdb_read_uleb128 (buf, buf_end, r);
1264 if (buf == NULL)
1265 error (_("DWARF expression error: ran off end of buffer reading uleb128 value"));
1266 return buf;
1269 /* Helper to read a sleb128 value or throw an error. */
1271 const gdb_byte *
1272 safe_read_sleb128 (const gdb_byte *buf, const gdb_byte *buf_end,
1273 int64_t *r)
1275 buf = gdb_read_sleb128 (buf, buf_end, r);
1276 if (buf == NULL)
1277 error (_("DWARF expression error: ran off end of buffer reading sleb128 value"));
1278 return buf;
1281 const gdb_byte *
1282 safe_skip_leb128 (const gdb_byte *buf, const gdb_byte *buf_end)
1284 buf = gdb_skip_leb128 (buf, buf_end);
1285 if (buf == NULL)
1286 error (_("DWARF expression error: ran off end of buffer reading leb128 value"));
1287 return buf;
1291 /* Check that the current operator is either at the end of an
1292 expression, or that it is followed by a composition operator or by
1293 DW_OP_GNU_uninit (which should terminate the expression). */
1295 void
1296 dwarf_expr_require_composition (const gdb_byte *op_ptr, const gdb_byte *op_end,
1297 const char *op_name)
1299 if (op_ptr != op_end && *op_ptr != DW_OP_piece && *op_ptr != DW_OP_bit_piece
1300 && *op_ptr != DW_OP_GNU_uninit)
1301 error (_("DWARF-2 expression error: `%s' operations must be "
1302 "used either alone or in conjunction with DW_OP_piece "
1303 "or DW_OP_bit_piece."),
1304 op_name);
1307 /* Return true iff the types T1 and T2 are "the same". This only does
1308 checks that might reasonably be needed to compare DWARF base
1309 types. */
1311 static int
1312 base_types_equal_p (struct type *t1, struct type *t2)
1314 if (t1->code () != t2->code ())
1315 return 0;
1316 if (t1->is_unsigned () != t2->is_unsigned ())
1317 return 0;
1318 return TYPE_LENGTH (t1) == TYPE_LENGTH (t2);
1321 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_reg* return the
1322 DWARF register number. Otherwise return -1. */
1325 dwarf_block_to_dwarf_reg (const gdb_byte *buf, const gdb_byte *buf_end)
1327 uint64_t dwarf_reg;
1329 if (buf_end <= buf)
1330 return -1;
1331 if (*buf >= DW_OP_reg0 && *buf <= DW_OP_reg31)
1333 if (buf_end - buf != 1)
1334 return -1;
1335 return *buf - DW_OP_reg0;
1338 if (*buf == DW_OP_regval_type || *buf == DW_OP_GNU_regval_type)
1340 buf++;
1341 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1342 if (buf == NULL)
1343 return -1;
1344 buf = gdb_skip_leb128 (buf, buf_end);
1345 if (buf == NULL)
1346 return -1;
1348 else if (*buf == DW_OP_regx)
1350 buf++;
1351 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1352 if (buf == NULL)
1353 return -1;
1355 else
1356 return -1;
1357 if (buf != buf_end || (int) dwarf_reg != dwarf_reg)
1358 return -1;
1359 return dwarf_reg;
1362 /* If <BUF..BUF_END] contains DW_FORM_block* with just DW_OP_breg*(0) and
1363 DW_OP_deref* return the DWARF register number. Otherwise return -1.
1364 DEREF_SIZE_RETURN contains -1 for DW_OP_deref; otherwise it contains the
1365 size from DW_OP_deref_size. */
1368 dwarf_block_to_dwarf_reg_deref (const gdb_byte *buf, const gdb_byte *buf_end,
1369 CORE_ADDR *deref_size_return)
1371 uint64_t dwarf_reg;
1372 int64_t offset;
1374 if (buf_end <= buf)
1375 return -1;
1377 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1379 dwarf_reg = *buf - DW_OP_breg0;
1380 buf++;
1381 if (buf >= buf_end)
1382 return -1;
1384 else if (*buf == DW_OP_bregx)
1386 buf++;
1387 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1388 if (buf == NULL)
1389 return -1;
1390 if ((int) dwarf_reg != dwarf_reg)
1391 return -1;
1393 else
1394 return -1;
1396 buf = gdb_read_sleb128 (buf, buf_end, &offset);
1397 if (buf == NULL)
1398 return -1;
1399 if (offset != 0)
1400 return -1;
1402 if (*buf == DW_OP_deref)
1404 buf++;
1405 *deref_size_return = -1;
1407 else if (*buf == DW_OP_deref_size)
1409 buf++;
1410 if (buf >= buf_end)
1411 return -1;
1412 *deref_size_return = *buf++;
1414 else
1415 return -1;
1417 if (buf != buf_end)
1418 return -1;
1420 return dwarf_reg;
1423 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_fbreg(X) fill
1424 in FB_OFFSET_RETURN with the X offset and return 1. Otherwise return 0. */
1427 dwarf_block_to_fb_offset (const gdb_byte *buf, const gdb_byte *buf_end,
1428 CORE_ADDR *fb_offset_return)
1430 int64_t fb_offset;
1432 if (buf_end <= buf)
1433 return 0;
1435 if (*buf != DW_OP_fbreg)
1436 return 0;
1437 buf++;
1439 buf = gdb_read_sleb128 (buf, buf_end, &fb_offset);
1440 if (buf == NULL)
1441 return 0;
1442 *fb_offset_return = fb_offset;
1443 if (buf != buf_end || fb_offset != (LONGEST) *fb_offset_return)
1444 return 0;
1446 return 1;
1449 /* If <BUF..BUF_END] contains DW_FORM_block* with single DW_OP_bregSP(X) fill
1450 in SP_OFFSET_RETURN with the X offset and return 1. Otherwise return 0.
1451 The matched SP register number depends on GDBARCH. */
1454 dwarf_block_to_sp_offset (struct gdbarch *gdbarch, const gdb_byte *buf,
1455 const gdb_byte *buf_end, CORE_ADDR *sp_offset_return)
1457 uint64_t dwarf_reg;
1458 int64_t sp_offset;
1460 if (buf_end <= buf)
1461 return 0;
1462 if (*buf >= DW_OP_breg0 && *buf <= DW_OP_breg31)
1464 dwarf_reg = *buf - DW_OP_breg0;
1465 buf++;
1467 else
1469 if (*buf != DW_OP_bregx)
1470 return 0;
1471 buf++;
1472 buf = gdb_read_uleb128 (buf, buf_end, &dwarf_reg);
1473 if (buf == NULL)
1474 return 0;
1477 if (dwarf_reg_to_regnum (gdbarch, dwarf_reg)
1478 != gdbarch_sp_regnum (gdbarch))
1479 return 0;
1481 buf = gdb_read_sleb128 (buf, buf_end, &sp_offset);
1482 if (buf == NULL)
1483 return 0;
1484 *sp_offset_return = sp_offset;
1485 if (buf != buf_end || sp_offset != (LONGEST) *sp_offset_return)
1486 return 0;
1488 return 1;
1491 /* The engine for the expression evaluator. Using the context in this
1492 object, evaluate the expression between OP_PTR and OP_END. */
1494 void
1495 dwarf_expr_context::execute_stack_op (const gdb_byte *op_ptr,
1496 const gdb_byte *op_end)
1498 gdbarch *arch = this->m_per_objfile->objfile->arch ();
1499 bfd_endian byte_order = gdbarch_byte_order (arch);
1500 /* Old-style "untyped" DWARF values need special treatment in a
1501 couple of places, specifically DW_OP_mod and DW_OP_shr. We need
1502 a special type for these values so we can distinguish them from
1503 values that have an explicit type, because explicitly-typed
1504 values do not need special treatment. This special type must be
1505 different (in the `==' sense) from any base type coming from the
1506 CU. */
1507 type *address_type = this->address_type ();
1509 this->m_location = DWARF_VALUE_MEMORY;
1510 this->m_initialized = 1; /* Default is initialized. */
1512 if (this->m_recursion_depth > this->m_max_recursion_depth)
1513 error (_("DWARF-2 expression error: Loop detected (%d)."),
1514 this->m_recursion_depth);
1515 this->m_recursion_depth++;
1517 while (op_ptr < op_end)
1519 dwarf_location_atom op = (dwarf_location_atom) *op_ptr++;
1520 ULONGEST result;
1521 /* Assume the value is not in stack memory.
1522 Code that knows otherwise sets this to true.
1523 Some arithmetic on stack addresses can probably be assumed to still
1524 be a stack address, but we skip this complication for now.
1525 This is just an optimization, so it's always ok to punt
1526 and leave this as false. */
1527 bool in_stack_memory = false;
1528 uint64_t uoffset, reg;
1529 int64_t offset;
1530 value *result_val = NULL;
1532 /* The DWARF expression might have a bug causing an infinite
1533 loop. In that case, quitting is the only way out. */
1534 QUIT;
1536 switch (op)
1538 case DW_OP_lit0:
1539 case DW_OP_lit1:
1540 case DW_OP_lit2:
1541 case DW_OP_lit3:
1542 case DW_OP_lit4:
1543 case DW_OP_lit5:
1544 case DW_OP_lit6:
1545 case DW_OP_lit7:
1546 case DW_OP_lit8:
1547 case DW_OP_lit9:
1548 case DW_OP_lit10:
1549 case DW_OP_lit11:
1550 case DW_OP_lit12:
1551 case DW_OP_lit13:
1552 case DW_OP_lit14:
1553 case DW_OP_lit15:
1554 case DW_OP_lit16:
1555 case DW_OP_lit17:
1556 case DW_OP_lit18:
1557 case DW_OP_lit19:
1558 case DW_OP_lit20:
1559 case DW_OP_lit21:
1560 case DW_OP_lit22:
1561 case DW_OP_lit23:
1562 case DW_OP_lit24:
1563 case DW_OP_lit25:
1564 case DW_OP_lit26:
1565 case DW_OP_lit27:
1566 case DW_OP_lit28:
1567 case DW_OP_lit29:
1568 case DW_OP_lit30:
1569 case DW_OP_lit31:
1570 result = op - DW_OP_lit0;
1571 result_val = value_from_ulongest (address_type, result);
1572 break;
1574 case DW_OP_addr:
1575 result = extract_unsigned_integer (op_ptr,
1576 this->m_addr_size, byte_order);
1577 op_ptr += this->m_addr_size;
1578 /* Some versions of GCC emit DW_OP_addr before
1579 DW_OP_GNU_push_tls_address. In this case the value is an
1580 index, not an address. We don't support things like
1581 branching between the address and the TLS op. */
1582 if (op_ptr >= op_end || *op_ptr != DW_OP_GNU_push_tls_address)
1583 result += this->m_per_objfile->objfile->text_section_offset ();
1584 result_val = value_from_ulongest (address_type, result);
1585 break;
1587 case DW_OP_addrx:
1588 case DW_OP_GNU_addr_index:
1589 ensure_have_per_cu (this->m_per_cu, "DW_OP_addrx");
1591 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1592 result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1593 uoffset);
1594 result += this->m_per_objfile->objfile->text_section_offset ();
1595 result_val = value_from_ulongest (address_type, result);
1596 break;
1597 case DW_OP_GNU_const_index:
1598 ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_const_index");
1600 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1601 result = dwarf2_read_addr_index (this->m_per_cu, this->m_per_objfile,
1602 uoffset);
1603 result_val = value_from_ulongest (address_type, result);
1604 break;
1606 case DW_OP_const1u:
1607 result = extract_unsigned_integer (op_ptr, 1, byte_order);
1608 result_val = value_from_ulongest (address_type, result);
1609 op_ptr += 1;
1610 break;
1611 case DW_OP_const1s:
1612 result = extract_signed_integer (op_ptr, 1, byte_order);
1613 result_val = value_from_ulongest (address_type, result);
1614 op_ptr += 1;
1615 break;
1616 case DW_OP_const2u:
1617 result = extract_unsigned_integer (op_ptr, 2, byte_order);
1618 result_val = value_from_ulongest (address_type, result);
1619 op_ptr += 2;
1620 break;
1621 case DW_OP_const2s:
1622 result = extract_signed_integer (op_ptr, 2, byte_order);
1623 result_val = value_from_ulongest (address_type, result);
1624 op_ptr += 2;
1625 break;
1626 case DW_OP_const4u:
1627 result = extract_unsigned_integer (op_ptr, 4, byte_order);
1628 result_val = value_from_ulongest (address_type, result);
1629 op_ptr += 4;
1630 break;
1631 case DW_OP_const4s:
1632 result = extract_signed_integer (op_ptr, 4, byte_order);
1633 result_val = value_from_ulongest (address_type, result);
1634 op_ptr += 4;
1635 break;
1636 case DW_OP_const8u:
1637 result = extract_unsigned_integer (op_ptr, 8, byte_order);
1638 result_val = value_from_ulongest (address_type, result);
1639 op_ptr += 8;
1640 break;
1641 case DW_OP_const8s:
1642 result = extract_signed_integer (op_ptr, 8, byte_order);
1643 result_val = value_from_ulongest (address_type, result);
1644 op_ptr += 8;
1645 break;
1646 case DW_OP_constu:
1647 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1648 result = uoffset;
1649 result_val = value_from_ulongest (address_type, result);
1650 break;
1651 case DW_OP_consts:
1652 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1653 result = offset;
1654 result_val = value_from_ulongest (address_type, result);
1655 break;
1657 /* The DW_OP_reg operations are required to occur alone in
1658 location expressions. */
1659 case DW_OP_reg0:
1660 case DW_OP_reg1:
1661 case DW_OP_reg2:
1662 case DW_OP_reg3:
1663 case DW_OP_reg4:
1664 case DW_OP_reg5:
1665 case DW_OP_reg6:
1666 case DW_OP_reg7:
1667 case DW_OP_reg8:
1668 case DW_OP_reg9:
1669 case DW_OP_reg10:
1670 case DW_OP_reg11:
1671 case DW_OP_reg12:
1672 case DW_OP_reg13:
1673 case DW_OP_reg14:
1674 case DW_OP_reg15:
1675 case DW_OP_reg16:
1676 case DW_OP_reg17:
1677 case DW_OP_reg18:
1678 case DW_OP_reg19:
1679 case DW_OP_reg20:
1680 case DW_OP_reg21:
1681 case DW_OP_reg22:
1682 case DW_OP_reg23:
1683 case DW_OP_reg24:
1684 case DW_OP_reg25:
1685 case DW_OP_reg26:
1686 case DW_OP_reg27:
1687 case DW_OP_reg28:
1688 case DW_OP_reg29:
1689 case DW_OP_reg30:
1690 case DW_OP_reg31:
1691 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_reg");
1693 result = op - DW_OP_reg0;
1694 result_val = value_from_ulongest (address_type, result);
1695 this->m_location = DWARF_VALUE_REGISTER;
1696 break;
1698 case DW_OP_regx:
1699 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1700 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_regx");
1702 result = reg;
1703 result_val = value_from_ulongest (address_type, result);
1704 this->m_location = DWARF_VALUE_REGISTER;
1705 break;
1707 case DW_OP_implicit_value:
1709 uint64_t len;
1711 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
1712 if (op_ptr + len > op_end)
1713 error (_("DW_OP_implicit_value: too few bytes available."));
1714 this->m_len = len;
1715 this->m_data = op_ptr;
1716 this->m_location = DWARF_VALUE_LITERAL;
1717 op_ptr += len;
1718 dwarf_expr_require_composition (op_ptr, op_end,
1719 "DW_OP_implicit_value");
1721 goto no_push;
1723 case DW_OP_stack_value:
1724 this->m_location = DWARF_VALUE_STACK;
1725 dwarf_expr_require_composition (op_ptr, op_end, "DW_OP_stack_value");
1726 goto no_push;
1728 case DW_OP_implicit_pointer:
1729 case DW_OP_GNU_implicit_pointer:
1731 int64_t len;
1732 ensure_have_per_cu (this->m_per_cu, "DW_OP_implicit_pointer");
1734 int ref_addr_size = this->m_per_cu->ref_addr_size ();
1736 /* The referred-to DIE of sect_offset kind. */
1737 this->m_len = extract_unsigned_integer (op_ptr, ref_addr_size,
1738 byte_order);
1739 op_ptr += ref_addr_size;
1741 /* The byte offset into the data. */
1742 op_ptr = safe_read_sleb128 (op_ptr, op_end, &len);
1743 result = (ULONGEST) len;
1744 result_val = value_from_ulongest (address_type, result);
1746 this->m_location = DWARF_VALUE_IMPLICIT_POINTER;
1747 dwarf_expr_require_composition (op_ptr, op_end,
1748 "DW_OP_implicit_pointer");
1750 break;
1752 case DW_OP_breg0:
1753 case DW_OP_breg1:
1754 case DW_OP_breg2:
1755 case DW_OP_breg3:
1756 case DW_OP_breg4:
1757 case DW_OP_breg5:
1758 case DW_OP_breg6:
1759 case DW_OP_breg7:
1760 case DW_OP_breg8:
1761 case DW_OP_breg9:
1762 case DW_OP_breg10:
1763 case DW_OP_breg11:
1764 case DW_OP_breg12:
1765 case DW_OP_breg13:
1766 case DW_OP_breg14:
1767 case DW_OP_breg15:
1768 case DW_OP_breg16:
1769 case DW_OP_breg17:
1770 case DW_OP_breg18:
1771 case DW_OP_breg19:
1772 case DW_OP_breg20:
1773 case DW_OP_breg21:
1774 case DW_OP_breg22:
1775 case DW_OP_breg23:
1776 case DW_OP_breg24:
1777 case DW_OP_breg25:
1778 case DW_OP_breg26:
1779 case DW_OP_breg27:
1780 case DW_OP_breg28:
1781 case DW_OP_breg29:
1782 case DW_OP_breg30:
1783 case DW_OP_breg31:
1785 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1786 ensure_have_frame (this->m_frame, "DW_OP_breg");
1788 result = read_addr_from_reg (this->m_frame, op - DW_OP_breg0);
1789 result += offset;
1790 result_val = value_from_ulongest (address_type, result);
1792 break;
1793 case DW_OP_bregx:
1795 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1796 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1797 ensure_have_frame (this->m_frame, "DW_OP_bregx");
1799 result = read_addr_from_reg (this->m_frame, reg);
1800 result += offset;
1801 result_val = value_from_ulongest (address_type, result);
1803 break;
1804 case DW_OP_fbreg:
1806 const gdb_byte *datastart;
1807 size_t datalen;
1809 op_ptr = safe_read_sleb128 (op_ptr, op_end, &offset);
1811 /* Rather than create a whole new context, we simply
1812 backup the current stack locally and install a new empty stack,
1813 then reset it afterwards, effectively erasing whatever the
1814 recursive call put there. */
1815 std::vector<dwarf_stack_value> saved_stack = std::move (this->m_stack);
1816 this->m_stack.clear ();
1818 /* FIXME: cagney/2003-03-26: This code should be using
1819 get_frame_base_address(), and then implement a dwarf2
1820 specific this_base method. */
1821 this->get_frame_base (&datastart, &datalen);
1822 eval (datastart, datalen);
1823 if (this->m_location == DWARF_VALUE_MEMORY)
1824 result = fetch_address (0);
1825 else if (this->m_location == DWARF_VALUE_REGISTER)
1826 result
1827 = read_addr_from_reg (this->m_frame, value_as_long (fetch (0)));
1828 else
1829 error (_("Not implemented: computing frame "
1830 "base using explicit value operator"));
1831 result = result + offset;
1832 result_val = value_from_ulongest (address_type, result);
1833 in_stack_memory = true;
1835 /* Restore the content of the original stack. */
1836 this->m_stack = std::move (saved_stack);
1838 this->m_location = DWARF_VALUE_MEMORY;
1840 break;
1842 case DW_OP_dup:
1843 result_val = fetch (0);
1844 in_stack_memory = fetch_in_stack_memory (0);
1845 break;
1847 case DW_OP_drop:
1848 pop ();
1849 goto no_push;
1851 case DW_OP_pick:
1852 offset = *op_ptr++;
1853 result_val = fetch (offset);
1854 in_stack_memory = fetch_in_stack_memory (offset);
1855 break;
1857 case DW_OP_swap:
1859 if (this->m_stack.size () < 2)
1860 error (_("Not enough elements for "
1861 "DW_OP_swap. Need 2, have %zu."),
1862 this->m_stack.size ());
1864 dwarf_stack_value &t1 = this->m_stack[this->m_stack.size () - 1];
1865 dwarf_stack_value &t2 = this->m_stack[this->m_stack.size () - 2];
1866 std::swap (t1, t2);
1867 goto no_push;
1870 case DW_OP_over:
1871 result_val = fetch (1);
1872 in_stack_memory = fetch_in_stack_memory (1);
1873 break;
1875 case DW_OP_rot:
1877 if (this->m_stack.size () < 3)
1878 error (_("Not enough elements for "
1879 "DW_OP_rot. Need 3, have %zu."),
1880 this->m_stack.size ());
1882 dwarf_stack_value temp = this->m_stack[this->m_stack.size () - 1];
1883 this->m_stack[this->m_stack.size () - 1]
1884 = this->m_stack[this->m_stack.size () - 2];
1885 this->m_stack[this->m_stack.size () - 2]
1886 = this->m_stack[this->m_stack.size () - 3];
1887 this->m_stack[this->m_stack.size () - 3] = temp;
1888 goto no_push;
1891 case DW_OP_deref:
1892 case DW_OP_deref_size:
1893 case DW_OP_deref_type:
1894 case DW_OP_GNU_deref_type:
1896 int addr_size = (op == DW_OP_deref ? this->m_addr_size : *op_ptr++);
1897 gdb_byte *buf = (gdb_byte *) alloca (addr_size);
1898 CORE_ADDR addr = fetch_address (0);
1899 struct type *type;
1901 pop ();
1903 if (op == DW_OP_deref_type || op == DW_OP_GNU_deref_type)
1905 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
1906 cu_offset type_die_cu_off = (cu_offset) uoffset;
1907 type = get_base_type (type_die_cu_off);
1909 else
1910 type = address_type;
1912 this->read_mem (buf, addr, addr_size);
1914 /* If the size of the object read from memory is different
1915 from the type length, we need to zero-extend it. */
1916 if (TYPE_LENGTH (type) != addr_size)
1918 ULONGEST datum =
1919 extract_unsigned_integer (buf, addr_size, byte_order);
1921 buf = (gdb_byte *) alloca (TYPE_LENGTH (type));
1922 store_unsigned_integer (buf, TYPE_LENGTH (type),
1923 byte_order, datum);
1926 result_val = value_from_contents_and_address (type, buf, addr);
1927 break;
1930 case DW_OP_abs:
1931 case DW_OP_neg:
1932 case DW_OP_not:
1933 case DW_OP_plus_uconst:
1935 /* Unary operations. */
1936 result_val = fetch (0);
1937 pop ();
1939 switch (op)
1941 case DW_OP_abs:
1942 if (value_less (result_val,
1943 value_zero (value_type (result_val), not_lval)))
1944 result_val = value_neg (result_val);
1945 break;
1946 case DW_OP_neg:
1947 result_val = value_neg (result_val);
1948 break;
1949 case DW_OP_not:
1950 dwarf_require_integral (value_type (result_val));
1951 result_val = value_complement (result_val);
1952 break;
1953 case DW_OP_plus_uconst:
1954 dwarf_require_integral (value_type (result_val));
1955 result = value_as_long (result_val);
1956 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
1957 result += reg;
1958 result_val = value_from_ulongest (address_type, result);
1959 break;
1962 break;
1964 case DW_OP_and:
1965 case DW_OP_div:
1966 case DW_OP_minus:
1967 case DW_OP_mod:
1968 case DW_OP_mul:
1969 case DW_OP_or:
1970 case DW_OP_plus:
1971 case DW_OP_shl:
1972 case DW_OP_shr:
1973 case DW_OP_shra:
1974 case DW_OP_xor:
1975 case DW_OP_le:
1976 case DW_OP_ge:
1977 case DW_OP_eq:
1978 case DW_OP_lt:
1979 case DW_OP_gt:
1980 case DW_OP_ne:
1982 /* Binary operations. */
1983 struct value *first, *second;
1985 second = fetch (0);
1986 pop ();
1988 first = fetch (0);
1989 pop ();
1991 if (! base_types_equal_p (value_type (first), value_type (second)))
1992 error (_("Incompatible types on DWARF stack"));
1994 switch (op)
1996 case DW_OP_and:
1997 dwarf_require_integral (value_type (first));
1998 dwarf_require_integral (value_type (second));
1999 result_val = value_binop (first, second, BINOP_BITWISE_AND);
2000 break;
2001 case DW_OP_div:
2002 result_val = value_binop (first, second, BINOP_DIV);
2003 break;
2004 case DW_OP_minus:
2005 result_val = value_binop (first, second, BINOP_SUB);
2006 break;
2007 case DW_OP_mod:
2009 int cast_back = 0;
2010 struct type *orig_type = value_type (first);
2012 /* We have to special-case "old-style" untyped values
2013 -- these must have mod computed using unsigned
2014 math. */
2015 if (orig_type == address_type)
2017 struct type *utype = get_unsigned_type (arch, orig_type);
2019 cast_back = 1;
2020 first = value_cast (utype, first);
2021 second = value_cast (utype, second);
2023 /* Note that value_binop doesn't handle float or
2024 decimal float here. This seems unimportant. */
2025 result_val = value_binop (first, second, BINOP_MOD);
2026 if (cast_back)
2027 result_val = value_cast (orig_type, result_val);
2029 break;
2030 case DW_OP_mul:
2031 result_val = value_binop (first, second, BINOP_MUL);
2032 break;
2033 case DW_OP_or:
2034 dwarf_require_integral (value_type (first));
2035 dwarf_require_integral (value_type (second));
2036 result_val = value_binop (first, second, BINOP_BITWISE_IOR);
2037 break;
2038 case DW_OP_plus:
2039 result_val = value_binop (first, second, BINOP_ADD);
2040 break;
2041 case DW_OP_shl:
2042 dwarf_require_integral (value_type (first));
2043 dwarf_require_integral (value_type (second));
2044 result_val = value_binop (first, second, BINOP_LSH);
2045 break;
2046 case DW_OP_shr:
2047 dwarf_require_integral (value_type (first));
2048 dwarf_require_integral (value_type (second));
2049 if (!value_type (first)->is_unsigned ())
2051 struct type *utype
2052 = get_unsigned_type (arch, value_type (first));
2054 first = value_cast (utype, first);
2057 result_val = value_binop (first, second, BINOP_RSH);
2058 /* Make sure we wind up with the same type we started
2059 with. */
2060 if (value_type (result_val) != value_type (second))
2061 result_val = value_cast (value_type (second), result_val);
2062 break;
2063 case DW_OP_shra:
2064 dwarf_require_integral (value_type (first));
2065 dwarf_require_integral (value_type (second));
2066 if (value_type (first)->is_unsigned ())
2068 struct type *stype
2069 = get_signed_type (arch, value_type (first));
2071 first = value_cast (stype, first);
2074 result_val = value_binop (first, second, BINOP_RSH);
2075 /* Make sure we wind up with the same type we started
2076 with. */
2077 if (value_type (result_val) != value_type (second))
2078 result_val = value_cast (value_type (second), result_val);
2079 break;
2080 case DW_OP_xor:
2081 dwarf_require_integral (value_type (first));
2082 dwarf_require_integral (value_type (second));
2083 result_val = value_binop (first, second, BINOP_BITWISE_XOR);
2084 break;
2085 case DW_OP_le:
2086 /* A <= B is !(B < A). */
2087 result = ! value_less (second, first);
2088 result_val = value_from_ulongest (address_type, result);
2089 break;
2090 case DW_OP_ge:
2091 /* A >= B is !(A < B). */
2092 result = ! value_less (first, second);
2093 result_val = value_from_ulongest (address_type, result);
2094 break;
2095 case DW_OP_eq:
2096 result = value_equal (first, second);
2097 result_val = value_from_ulongest (address_type, result);
2098 break;
2099 case DW_OP_lt:
2100 result = value_less (first, second);
2101 result_val = value_from_ulongest (address_type, result);
2102 break;
2103 case DW_OP_gt:
2104 /* A > B is B < A. */
2105 result = value_less (second, first);
2106 result_val = value_from_ulongest (address_type, result);
2107 break;
2108 case DW_OP_ne:
2109 result = ! value_equal (first, second);
2110 result_val = value_from_ulongest (address_type, result);
2111 break;
2112 default:
2113 internal_error (__FILE__, __LINE__,
2114 _("Can't be reached."));
2117 break;
2119 case DW_OP_call_frame_cfa:
2120 ensure_have_frame (this->m_frame, "DW_OP_call_frame_cfa");
2122 result = dwarf2_frame_cfa (this->m_frame);
2123 result_val = value_from_ulongest (address_type, result);
2124 in_stack_memory = true;
2125 break;
2127 case DW_OP_GNU_push_tls_address:
2128 case DW_OP_form_tls_address:
2129 /* Variable is at a constant offset in the thread-local
2130 storage block into the objfile for the current thread and
2131 the dynamic linker module containing this expression. Here
2132 we return returns the offset from that base. The top of the
2133 stack has the offset from the beginning of the thread
2134 control block at which the variable is located. Nothing
2135 should follow this operator, so the top of stack would be
2136 returned. */
2137 result = value_as_long (fetch (0));
2138 pop ();
2139 result = target_translate_tls_address (this->m_per_objfile->objfile,
2140 result);
2141 result_val = value_from_ulongest (address_type, result);
2142 break;
2144 case DW_OP_skip:
2145 offset = extract_signed_integer (op_ptr, 2, byte_order);
2146 op_ptr += 2;
2147 op_ptr += offset;
2148 goto no_push;
2150 case DW_OP_bra:
2152 struct value *val;
2154 offset = extract_signed_integer (op_ptr, 2, byte_order);
2155 op_ptr += 2;
2156 val = fetch (0);
2157 dwarf_require_integral (value_type (val));
2158 if (value_as_long (val) != 0)
2159 op_ptr += offset;
2160 pop ();
2162 goto no_push;
2164 case DW_OP_nop:
2165 goto no_push;
2167 case DW_OP_piece:
2169 uint64_t size;
2171 /* Record the piece. */
2172 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2173 add_piece (8 * size, 0);
2175 /* Pop off the address/regnum, and reset the location
2176 type. */
2177 if (this->m_location != DWARF_VALUE_LITERAL
2178 && this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2179 pop ();
2180 this->m_location = DWARF_VALUE_MEMORY;
2182 goto no_push;
2184 case DW_OP_bit_piece:
2186 uint64_t size, uleb_offset;
2188 /* Record the piece. */
2189 op_ptr = safe_read_uleb128 (op_ptr, op_end, &size);
2190 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uleb_offset);
2191 add_piece (size, uleb_offset);
2193 /* Pop off the address/regnum, and reset the location
2194 type. */
2195 if (this->m_location != DWARF_VALUE_LITERAL
2196 && this->m_location != DWARF_VALUE_OPTIMIZED_OUT)
2197 pop ();
2198 this->m_location = DWARF_VALUE_MEMORY;
2200 goto no_push;
2202 case DW_OP_GNU_uninit:
2203 if (op_ptr != op_end)
2204 error (_("DWARF-2 expression error: DW_OP_GNU_uninit must always "
2205 "be the very last op."));
2207 this->m_initialized = 0;
2208 goto no_push;
2210 case DW_OP_call2:
2212 cu_offset cu_off
2213 = (cu_offset) extract_unsigned_integer (op_ptr, 2, byte_order);
2214 op_ptr += 2;
2215 this->dwarf_call (cu_off);
2217 goto no_push;
2219 case DW_OP_call4:
2221 cu_offset cu_off
2222 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2223 op_ptr += 4;
2224 this->dwarf_call (cu_off);
2226 goto no_push;
2228 case DW_OP_GNU_variable_value:
2230 ensure_have_per_cu (this->m_per_cu, "DW_OP_GNU_variable_value");
2231 int ref_addr_size = this->m_per_cu->ref_addr_size ();
2233 sect_offset sect_off
2234 = (sect_offset) extract_unsigned_integer (op_ptr,
2235 ref_addr_size,
2236 byte_order);
2237 op_ptr += ref_addr_size;
2238 result_val = sect_variable_value (sect_off, this->m_per_cu,
2239 this->m_per_objfile);
2240 result_val = value_cast (address_type, result_val);
2242 break;
2244 case DW_OP_entry_value:
2245 case DW_OP_GNU_entry_value:
2247 uint64_t len;
2248 CORE_ADDR deref_size;
2249 union call_site_parameter_u kind_u;
2251 op_ptr = safe_read_uleb128 (op_ptr, op_end, &len);
2252 if (op_ptr + len > op_end)
2253 error (_("DW_OP_entry_value: too few bytes available."));
2255 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg (op_ptr, op_ptr + len);
2256 if (kind_u.dwarf_reg != -1)
2258 op_ptr += len;
2259 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2260 kind_u,
2261 -1 /* deref_size */);
2262 goto no_push;
2265 kind_u.dwarf_reg = dwarf_block_to_dwarf_reg_deref (op_ptr,
2266 op_ptr + len,
2267 &deref_size);
2268 if (kind_u.dwarf_reg != -1)
2270 if (deref_size == -1)
2271 deref_size = this->m_addr_size;
2272 op_ptr += len;
2273 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_DWARF_REG,
2274 kind_u, deref_size);
2275 goto no_push;
2278 error (_("DWARF-2 expression error: DW_OP_entry_value is "
2279 "supported only for single DW_OP_reg* "
2280 "or for DW_OP_breg*(0)+DW_OP_deref*"));
2283 case DW_OP_GNU_parameter_ref:
2285 union call_site_parameter_u kind_u;
2287 kind_u.param_cu_off
2288 = (cu_offset) extract_unsigned_integer (op_ptr, 4, byte_order);
2289 op_ptr += 4;
2290 this->push_dwarf_reg_entry_value (CALL_SITE_PARAMETER_PARAM_OFFSET,
2291 kind_u,
2292 -1 /* deref_size */);
2294 goto no_push;
2296 case DW_OP_const_type:
2297 case DW_OP_GNU_const_type:
2299 int n;
2300 const gdb_byte *data;
2301 struct type *type;
2303 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2304 cu_offset type_die_cu_off = (cu_offset) uoffset;
2306 n = *op_ptr++;
2307 data = op_ptr;
2308 op_ptr += n;
2310 type = get_base_type (type_die_cu_off);
2312 if (TYPE_LENGTH (type) != n)
2313 error (_("DW_OP_const_type has different sizes for type and data"));
2315 result_val = value_from_contents (type, data);
2317 break;
2319 case DW_OP_regval_type:
2320 case DW_OP_GNU_regval_type:
2322 op_ptr = safe_read_uleb128 (op_ptr, op_end, &reg);
2323 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2324 cu_offset type_die_cu_off = (cu_offset) uoffset;
2326 ensure_have_frame (this->m_frame, "DW_OP_regval_type");
2328 struct type *type = get_base_type (type_die_cu_off);
2329 int regnum
2330 = dwarf_reg_to_regnum_or_error (get_frame_arch (this->m_frame),
2331 reg);
2332 result_val = value_from_register (type, regnum, this->m_frame);
2334 break;
2336 case DW_OP_convert:
2337 case DW_OP_GNU_convert:
2338 case DW_OP_reinterpret:
2339 case DW_OP_GNU_reinterpret:
2341 struct type *type;
2343 op_ptr = safe_read_uleb128 (op_ptr, op_end, &uoffset);
2344 cu_offset type_die_cu_off = (cu_offset) uoffset;
2346 if (to_underlying (type_die_cu_off) == 0)
2347 type = address_type;
2348 else
2349 type = get_base_type (type_die_cu_off);
2351 result_val = fetch (0);
2352 pop ();
2354 if (op == DW_OP_convert || op == DW_OP_GNU_convert)
2355 result_val = value_cast (type, result_val);
2356 else if (type == value_type (result_val))
2358 /* Nothing. */
2360 else if (TYPE_LENGTH (type)
2361 != TYPE_LENGTH (value_type (result_val)))
2362 error (_("DW_OP_reinterpret has wrong size"));
2363 else
2364 result_val
2365 = value_from_contents (type,
2366 value_contents_all (result_val).data ());
2368 break;
2370 case DW_OP_push_object_address:
2371 /* Return the address of the object we are currently observing. */
2372 if (this->m_addr_info == nullptr
2373 || (this->m_addr_info->valaddr.data () == nullptr
2374 && this->m_addr_info->addr == 0))
2375 error (_("Location address is not set."));
2377 result_val
2378 = value_from_ulongest (address_type, this->m_addr_info->addr);
2379 break;
2381 default:
2382 error (_("Unhandled dwarf expression opcode 0x%x"), op);
2385 /* Most things push a result value. */
2386 gdb_assert (result_val != NULL);
2387 push (result_val, in_stack_memory);
2388 no_push:
2392 /* To simplify our main caller, if the result is an implicit
2393 pointer, then make a pieced value. This is ok because we can't
2394 have implicit pointers in contexts where pieces are invalid. */
2395 if (this->m_location == DWARF_VALUE_IMPLICIT_POINTER)
2396 add_piece (8 * this->m_addr_size, 0);
2398 this->m_recursion_depth--;
2399 gdb_assert (this->m_recursion_depth >= 0);
2402 void _initialize_dwarf2expr ();
2403 void
2404 _initialize_dwarf2expr ()
2406 dwarf_arch_cookie
2407 = gdbarch_data_register_post_init (dwarf_gdbarch_types_init);