1 /* Low level packing and unpacking of values for GDB, the GNU Debugger.
3 Copyright (C) 1986-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #include "arch-utils.h"
21 #include "extract-store-integer.h"
27 #include "cli/cli-cmds.h"
33 #include "target-float.h"
36 #include "cli/cli-decode.h"
37 #include "extension.h"
39 #include "tracepoint.h"
41 #include "user-regs.h"
47 #include "completer.h"
48 #include "gdbsupport/selftest.h"
49 #include "gdbsupport/array-view.h"
50 #include "cli/cli-style.h"
55 /* Definition of a user function. */
56 struct internal_function
58 /* The name of the function. It is a bit odd to have this in the
59 function itself -- the user might use a differently-named
60 convenience variable to hold the function. */
64 internal_function_fn_noside handler
;
66 /* User data for the handler. */
70 /* Returns true if the ranges defined by [offset1, offset1+len1) and
71 [offset2, offset2+len2) overlap. */
74 ranges_overlap (LONGEST offset1
, ULONGEST len1
,
75 LONGEST offset2
, ULONGEST len2
)
79 l
= std::max (offset1
, offset2
);
80 h
= std::min (offset1
+ len1
, offset2
+ len2
);
84 /* Returns true if RANGES contains any range that overlaps [OFFSET,
88 ranges_contain (const std::vector
<range
> &ranges
, LONGEST offset
,
96 /* We keep ranges sorted by offset and coalesce overlapping and
97 contiguous ranges, so to check if a range list contains a given
98 range, we can do a binary search for the position the given range
99 would be inserted if we only considered the starting OFFSET of
100 ranges. We call that position I. Since we also have LENGTH to
101 care for (this is a range afterall), we need to check if the
102 _previous_ range overlaps the I range. E.g.,
106 |---| |---| |------| ... |--|
111 In the case above, the binary search would return `I=1', meaning,
112 this OFFSET should be inserted at position 1, and the current
113 position 1 should be pushed further (and before 2). But, `0'
116 Then we need to check if the I range overlaps the I range itself.
121 |---| |---| |-------| ... |--|
128 auto i
= std::lower_bound (ranges
.begin (), ranges
.end (), what
);
130 if (i
> ranges
.begin ())
132 const struct range
&bef
= *(i
- 1);
134 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
138 if (i
< ranges
.end ())
140 const struct range
&r
= *i
;
142 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
149 static struct cmd_list_element
*functionlist
;
153 if (this->lval () == lval_computed
)
155 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
157 if (funcs
->free_closure
)
158 funcs
->free_closure (this);
160 else if (this->lval () == lval_xcallable
)
161 delete m_location
.xm_worker
;
169 return type ()->arch ();
173 value::bits_available (LONGEST offset
, ULONGEST length
) const
175 gdb_assert (!m_lazy
);
177 /* Don't pretend we have anything available there in the history beyond
178 the boundaries of the value recorded. It's not like inferior memory
179 where there is actual stuff underneath. */
180 ULONGEST val_len
= TARGET_CHAR_BIT
* enclosing_type ()->length ();
181 return !((m_in_history
182 && (offset
< 0 || offset
+ length
> val_len
))
183 || ranges_contain (m_unavailable
, offset
, length
));
187 value::bytes_available (LONGEST offset
, ULONGEST length
) const
189 ULONGEST sign
= (1ULL << (sizeof (ULONGEST
) * 8 - 1)) / TARGET_CHAR_BIT
;
190 ULONGEST mask
= (sign
<< 1) - 1;
192 if (offset
!= ((offset
& mask
) ^ sign
) - sign
193 || length
!= ((length
& mask
) ^ sign
) - sign
194 || (length
> 0 && (~offset
& (offset
+ length
- 1) & sign
) != 0))
195 error (_("Integer overflow in data location calculation"));
197 return bits_available (offset
* TARGET_CHAR_BIT
, length
* TARGET_CHAR_BIT
);
201 value::bits_any_optimized_out (int bit_offset
, int bit_length
) const
203 gdb_assert (!m_lazy
);
205 return ranges_contain (m_optimized_out
, bit_offset
, bit_length
);
209 value::entirely_available ()
211 /* We can only tell whether the whole value is available when we try
216 if (m_unavailable
.empty ())
224 value::entirely_covered_by_range_vector (const std::vector
<range
> &ranges
)
226 /* We can only tell whether the whole value is optimized out /
227 unavailable when we try to read it. */
231 if (ranges
.size () == 1)
233 const struct range
&t
= ranges
[0];
236 && t
.length
== TARGET_CHAR_BIT
* enclosing_type ()->length ())
243 /* Insert into the vector pointed to by VECTORP the bit range starting of
244 OFFSET bits, and extending for the next LENGTH bits. */
247 insert_into_bit_range_vector (std::vector
<range
> *vectorp
,
248 LONGEST offset
, ULONGEST length
)
252 /* Insert the range sorted. If there's overlap or the new range
253 would be contiguous with an existing range, merge. */
255 newr
.offset
= offset
;
256 newr
.length
= length
;
258 /* Do a binary search for the position the given range would be
259 inserted if we only considered the starting OFFSET of ranges.
260 Call that position I. Since we also have LENGTH to care for
261 (this is a range afterall), we need to check if the _previous_
262 range overlaps the I range. E.g., calling R the new range:
264 #1 - overlaps with previous
268 |---| |---| |------| ... |--|
273 In the case #1 above, the binary search would return `I=1',
274 meaning, this OFFSET should be inserted at position 1, and the
275 current position 1 should be pushed further (and become 2). But,
276 note that `0' overlaps with R, so we want to merge them.
278 A similar consideration needs to be taken if the new range would
279 be contiguous with the previous range:
281 #2 - contiguous with previous
285 |--| |---| |------| ... |--|
290 If there's no overlap with the previous range, as in:
292 #3 - not overlapping and not contiguous
296 |--| |---| |------| ... |--|
303 #4 - R is the range with lowest offset
307 |--| |---| |------| ... |--|
312 ... we just push the new range to I.
314 All the 4 cases above need to consider that the new range may
315 also overlap several of the ranges that follow, or that R may be
316 contiguous with the following range, and merge. E.g.,
318 #5 - overlapping following ranges
321 |------------------------|
322 |--| |---| |------| ... |--|
331 |--| |---| |------| ... |--|
338 auto i
= std::lower_bound (vectorp
->begin (), vectorp
->end (), newr
);
339 if (i
> vectorp
->begin ())
341 struct range
&bef
= *(i
- 1);
343 if (ranges_overlap (bef
.offset
, bef
.length
, offset
, length
))
346 LONGEST l
= std::min (bef
.offset
, offset
);
347 LONGEST h
= std::max (bef
.offset
+ bef
.length
, offset
+ length
);
353 else if (offset
== bef
.offset
+ bef
.length
)
356 bef
.length
+= length
;
362 i
= vectorp
->insert (i
, newr
);
368 i
= vectorp
->insert (i
, newr
);
371 /* Check whether the ranges following the one we've just added or
372 touched can be folded in (#5 above). */
373 if (i
!= vectorp
->end () && i
+ 1 < vectorp
->end ())
378 /* Get the range we just touched. */
379 struct range
&t
= *i
;
383 for (; i
< vectorp
->end (); i
++)
385 struct range
&r
= *i
;
386 if (r
.offset
<= t
.offset
+ t
.length
)
390 l
= std::min (t
.offset
, r
.offset
);
391 h
= std::max (t
.offset
+ t
.length
, r
.offset
+ r
.length
);
400 /* If we couldn't merge this one, we won't be able to
401 merge following ones either, since the ranges are
402 always sorted by OFFSET. */
408 vectorp
->erase (next
, next
+ removed
);
413 value::mark_bits_unavailable (LONGEST offset
, ULONGEST length
)
415 insert_into_bit_range_vector (&m_unavailable
, offset
, length
);
419 value::mark_bytes_unavailable (LONGEST offset
, ULONGEST length
)
421 mark_bits_unavailable (offset
* TARGET_CHAR_BIT
,
422 length
* TARGET_CHAR_BIT
);
425 /* Find the first range in RANGES that overlaps the range defined by
426 OFFSET and LENGTH, starting at element POS in the RANGES vector,
427 Returns the index into RANGES where such overlapping range was
428 found, or -1 if none was found. */
431 find_first_range_overlap (const std::vector
<range
> *ranges
, int pos
,
432 LONGEST offset
, LONGEST length
)
436 for (i
= pos
; i
< ranges
->size (); i
++)
438 const range
&r
= (*ranges
)[i
];
439 if (ranges_overlap (r
.offset
, r
.length
, offset
, length
))
446 /* Compare LENGTH_BITS of memory at PTR1 + OFFSET1_BITS with the memory at
447 PTR2 + OFFSET2_BITS. Return 0 if the memory is the same, otherwise
450 It must always be the case that:
451 OFFSET1_BITS % TARGET_CHAR_BIT == OFFSET2_BITS % TARGET_CHAR_BIT
453 It is assumed that memory can be accessed from:
454 PTR + (OFFSET_BITS / TARGET_CHAR_BIT)
456 PTR + ((OFFSET_BITS + LENGTH_BITS + TARGET_CHAR_BIT - 1)
457 / TARGET_CHAR_BIT) */
459 memcmp_with_bit_offsets (const gdb_byte
*ptr1
, size_t offset1_bits
,
460 const gdb_byte
*ptr2
, size_t offset2_bits
,
463 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
464 == offset2_bits
% TARGET_CHAR_BIT
);
466 if (offset1_bits
% TARGET_CHAR_BIT
!= 0)
469 gdb_byte mask
, b1
, b2
;
471 /* The offset from the base pointers PTR1 and PTR2 is not a complete
472 number of bytes. A number of bits up to either the next exact
473 byte boundary, or LENGTH_BITS (which ever is sooner) will be
475 bits
= TARGET_CHAR_BIT
- offset1_bits
% TARGET_CHAR_BIT
;
476 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
477 mask
= (1 << bits
) - 1;
479 if (length_bits
< bits
)
481 mask
&= ~(gdb_byte
) ((1 << (bits
- length_bits
)) - 1);
485 /* Now load the two bytes and mask off the bits we care about. */
486 b1
= *(ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
) & mask
;
487 b2
= *(ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
) & mask
;
492 /* Now update the length and offsets to take account of the bits
493 we've just compared. */
495 offset1_bits
+= bits
;
496 offset2_bits
+= bits
;
499 if (length_bits
% TARGET_CHAR_BIT
!= 0)
503 gdb_byte mask
, b1
, b2
;
505 /* The length is not an exact number of bytes. After the previous
506 IF.. block then the offsets are byte aligned, or the
507 length is zero (in which case this code is not reached). Compare
508 a number of bits at the end of the region, starting from an exact
510 bits
= length_bits
% TARGET_CHAR_BIT
;
511 o1
= offset1_bits
+ length_bits
- bits
;
512 o2
= offset2_bits
+ length_bits
- bits
;
514 gdb_assert (bits
< sizeof (mask
) * TARGET_CHAR_BIT
);
515 mask
= ((1 << bits
) - 1) << (TARGET_CHAR_BIT
- bits
);
517 gdb_assert (o1
% TARGET_CHAR_BIT
== 0);
518 gdb_assert (o2
% TARGET_CHAR_BIT
== 0);
520 b1
= *(ptr1
+ o1
/ TARGET_CHAR_BIT
) & mask
;
521 b2
= *(ptr2
+ o2
/ TARGET_CHAR_BIT
) & mask
;
531 /* We've now taken care of any stray "bits" at the start, or end of
532 the region to compare, the remainder can be covered with a simple
534 gdb_assert (offset1_bits
% TARGET_CHAR_BIT
== 0);
535 gdb_assert (offset2_bits
% TARGET_CHAR_BIT
== 0);
536 gdb_assert (length_bits
% TARGET_CHAR_BIT
== 0);
538 return memcmp (ptr1
+ offset1_bits
/ TARGET_CHAR_BIT
,
539 ptr2
+ offset2_bits
/ TARGET_CHAR_BIT
,
540 length_bits
/ TARGET_CHAR_BIT
);
543 /* Length is zero, regions match. */
547 /* Helper struct for find_first_range_overlap_and_match and
548 value_contents_bits_eq. Keep track of which slot of a given ranges
549 vector have we last looked at. */
551 struct ranges_and_idx
554 const std::vector
<range
> *ranges
;
556 /* The range we've last found in RANGES. Given ranges are sorted,
557 we can start the next lookup here. */
561 /* Helper function for value_contents_bits_eq. Compare LENGTH bits of
562 RP1's ranges starting at OFFSET1 bits with LENGTH bits of RP2's
563 ranges starting at OFFSET2 bits. Return true if the ranges match
564 and fill in *L and *H with the overlapping window relative to
565 (both) OFFSET1 or OFFSET2. */
568 find_first_range_overlap_and_match (struct ranges_and_idx
*rp1
,
569 struct ranges_and_idx
*rp2
,
570 LONGEST offset1
, LONGEST offset2
,
571 ULONGEST length
, ULONGEST
*l
, ULONGEST
*h
)
573 rp1
->idx
= find_first_range_overlap (rp1
->ranges
, rp1
->idx
,
575 rp2
->idx
= find_first_range_overlap (rp2
->ranges
, rp2
->idx
,
578 if (rp1
->idx
== -1 && rp2
->idx
== -1)
584 else if (rp1
->idx
== -1 || rp2
->idx
== -1)
588 const range
*r1
, *r2
;
592 r1
= &(*rp1
->ranges
)[rp1
->idx
];
593 r2
= &(*rp2
->ranges
)[rp2
->idx
];
595 /* Get the unavailable windows intersected by the incoming
596 ranges. The first and last ranges that overlap the argument
597 range may be wider than said incoming arguments ranges. */
598 l1
= std::max (offset1
, r1
->offset
);
599 h1
= std::min (offset1
+ length
, r1
->offset
+ r1
->length
);
601 l2
= std::max (offset2
, r2
->offset
);
602 h2
= std::min (offset2
+ length
, offset2
+ r2
->length
);
604 /* Make them relative to the respective start offsets, so we can
605 compare them for equality. */
612 /* Different ranges, no match. */
613 if (l1
!= l2
|| h1
!= h2
)
622 /* Helper function for value_contents_eq. The only difference is that
623 this function is bit rather than byte based.
625 Compare LENGTH bits of VAL1's contents starting at OFFSET1 bits
626 with LENGTH bits of VAL2's contents starting at OFFSET2 bits.
627 Return true if the available bits match. */
630 value::contents_bits_eq (int offset1
, const struct value
*val2
, int offset2
,
633 /* Each array element corresponds to a ranges source (unavailable,
634 optimized out). '1' is for VAL1, '2' for VAL2. */
635 struct ranges_and_idx rp1
[2], rp2
[2];
637 /* See function description in value.h. */
638 gdb_assert (!m_lazy
&& !val2
->m_lazy
);
640 /* We shouldn't be trying to compare past the end of the values. */
641 gdb_assert (offset1
+ length
642 <= m_enclosing_type
->length () * TARGET_CHAR_BIT
);
643 gdb_assert (offset2
+ length
644 <= val2
->m_enclosing_type
->length () * TARGET_CHAR_BIT
);
646 memset (&rp1
, 0, sizeof (rp1
));
647 memset (&rp2
, 0, sizeof (rp2
));
648 rp1
[0].ranges
= &m_unavailable
;
649 rp2
[0].ranges
= &val2
->m_unavailable
;
650 rp1
[1].ranges
= &m_optimized_out
;
651 rp2
[1].ranges
= &val2
->m_optimized_out
;
655 ULONGEST l
= 0, h
= 0; /* init for gcc -Wall */
658 for (i
= 0; i
< 2; i
++)
660 ULONGEST l_tmp
, h_tmp
;
662 /* The contents only match equal if the invalid/unavailable
663 contents ranges match as well. */
664 if (!find_first_range_overlap_and_match (&rp1
[i
], &rp2
[i
],
665 offset1
, offset2
, length
,
669 /* We're interested in the lowest/first range found. */
670 if (i
== 0 || l_tmp
< l
)
677 /* Compare the available/valid contents. */
678 if (memcmp_with_bit_offsets (m_contents
.get (), offset1
,
679 val2
->m_contents
.get (), offset2
, l
) != 0)
693 value::contents_eq (LONGEST offset1
,
694 const struct value
*val2
, LONGEST offset2
,
695 LONGEST length
) const
697 return contents_bits_eq (offset1
* TARGET_CHAR_BIT
,
698 val2
, offset2
* TARGET_CHAR_BIT
,
699 length
* TARGET_CHAR_BIT
);
705 value::contents_eq (const struct value
*val2
) const
707 ULONGEST len1
= check_typedef (enclosing_type ())->length ();
708 ULONGEST len2
= check_typedef (val2
->enclosing_type ())->length ();
711 return contents_eq (0, val2
, 0, len1
);
714 /* The value-history records all the values printed by print commands
715 during this session. */
717 static std::vector
<value_ref_ptr
> value_history
;
720 /* List of all value objects currently allocated
721 (except for those released by calls to release_value)
722 This is so they can be freed after each command. */
724 static std::vector
<value_ref_ptr
> all_values
;
729 value::allocate_lazy (struct type
*type
)
733 /* Call check_typedef on our type to make sure that, if TYPE
734 is a TYPE_CODE_TYPEDEF, its length is set to the length
735 of the target type instead of zero. However, we do not
736 replace the typedef type by the target type, because we want
737 to keep the typedef in order to be able to set the VAL's type
738 description correctly. */
739 check_typedef (type
);
741 val
= new struct value (type
);
743 /* Values start out on the all_values chain. */
744 all_values
.emplace_back (val
);
749 /* The maximum size, in bytes, that GDB will try to allocate for a value.
750 The initial value of 64k was not selected for any specific reason, it is
751 just a reasonable starting point. */
753 static int max_value_size
= 65536; /* 64k bytes */
755 /* It is critical that the MAX_VALUE_SIZE is at least as big as the size of
756 LONGEST, otherwise GDB will not be able to parse integer values from the
757 CLI; for example if the MAX_VALUE_SIZE could be set to 1 then GDB would
758 be unable to parse "set max-value-size 2".
760 As we want a consistent GDB experience across hosts with different sizes
761 of LONGEST, this arbitrary minimum value was selected, so long as this
762 is bigger than LONGEST on all GDB supported hosts we're fine. */
764 #define MIN_VALUE_FOR_MAX_VALUE_SIZE 16
765 static_assert (sizeof (LONGEST
) <= MIN_VALUE_FOR_MAX_VALUE_SIZE
);
767 /* Implement the "set max-value-size" command. */
770 set_max_value_size (const char *args
, int from_tty
,
771 struct cmd_list_element
*c
)
773 gdb_assert (max_value_size
== -1 || max_value_size
>= 0);
775 if (max_value_size
> -1 && max_value_size
< MIN_VALUE_FOR_MAX_VALUE_SIZE
)
777 max_value_size
= MIN_VALUE_FOR_MAX_VALUE_SIZE
;
778 error (_("max-value-size set too low, increasing to %d bytes"),
783 /* Implement the "show max-value-size" command. */
786 show_max_value_size (struct ui_file
*file
, int from_tty
,
787 struct cmd_list_element
*c
, const char *value
)
789 if (max_value_size
== -1)
790 gdb_printf (file
, _("Maximum value size is unlimited.\n"));
792 gdb_printf (file
, _("Maximum value size is %d bytes.\n"),
796 /* Called before we attempt to allocate or reallocate a buffer for the
797 contents of a value. TYPE is the type of the value for which we are
798 allocating the buffer. If the buffer is too large (based on the user
799 controllable setting) then throw an error. If this function returns
800 then we should attempt to allocate the buffer. */
803 check_type_length_before_alloc (const struct type
*type
)
805 ULONGEST length
= type
->length ();
807 if (exceeds_max_value_size (length
))
809 if (type
->name () != NULL
)
810 error (_("value of type `%s' requires %s bytes, which is more "
811 "than max-value-size"), type
->name (), pulongest (length
));
813 error (_("value requires %s bytes, which is more than "
814 "max-value-size"), pulongest (length
));
821 exceeds_max_value_size (ULONGEST length
)
823 return max_value_size
> -1 && length
> max_value_size
;
826 /* When this has a value, it is used to limit the number of array elements
827 of an array that are loaded into memory when an array value is made
829 static std::optional
<int> array_length_limiting_element_count
;
832 scoped_array_length_limiting::scoped_array_length_limiting (int elements
)
834 m_old_value
= array_length_limiting_element_count
;
835 array_length_limiting_element_count
.emplace (elements
);
839 scoped_array_length_limiting::~scoped_array_length_limiting ()
841 array_length_limiting_element_count
= m_old_value
;
844 /* Find the inner element type for ARRAY_TYPE. */
847 find_array_element_type (struct type
*array_type
)
849 array_type
= check_typedef (array_type
);
850 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
852 if (current_language
->la_language
== language_fortran
)
853 while (array_type
->code () == TYPE_CODE_ARRAY
)
855 array_type
= array_type
->target_type ();
856 array_type
= check_typedef (array_type
);
860 array_type
= array_type
->target_type ();
861 array_type
= check_typedef (array_type
);
867 /* Return the limited length of ARRAY_TYPE, which must be of
868 TYPE_CODE_ARRAY. This function can only be called when the global
869 ARRAY_LENGTH_LIMITING_ELEMENT_COUNT has a value.
871 The limited length of an array is the smallest of either (1) the total
872 size of the array type, or (2) the array target type multiplies by the
873 array_length_limiting_element_count. */
876 calculate_limited_array_length (struct type
*array_type
)
878 gdb_assert (array_length_limiting_element_count
.has_value ());
880 array_type
= check_typedef (array_type
);
881 gdb_assert (array_type
->code () == TYPE_CODE_ARRAY
);
883 struct type
*elm_type
= find_array_element_type (array_type
);
884 ULONGEST len
= (elm_type
->length ()
885 * (*array_length_limiting_element_count
));
886 len
= std::min (len
, array_type
->length ());
894 value::set_limited_array_length ()
896 ULONGEST limit
= m_limited_length
;
897 ULONGEST len
= type ()->length ();
899 if (array_length_limiting_element_count
.has_value ())
900 len
= calculate_limited_array_length (type ());
902 if (limit
!= 0 && len
> limit
)
904 if (len
> max_value_size
)
907 m_limited_length
= max_value_size
;
914 value::allocate_contents (bool check_size
)
918 struct type
*enc_type
= enclosing_type ();
919 ULONGEST len
= enc_type
->length ();
923 /* If we are allocating the contents of an array, which
924 is greater in size than max_value_size, and there is
925 an element limit in effect, then we can possibly try
926 to load only a sub-set of the array contents into
928 if (type () == enc_type
929 && type ()->code () == TYPE_CODE_ARRAY
930 && len
> max_value_size
931 && set_limited_array_length ())
932 len
= m_limited_length
;
934 check_type_length_before_alloc (enc_type
);
937 m_contents
.reset ((gdb_byte
*) xzalloc (len
));
941 /* Allocate a value and its contents for type TYPE. If CHECK_SIZE is true,
942 then apply the usual max-value-size checks. */
945 value::allocate (struct type
*type
, bool check_size
)
947 struct value
*val
= value::allocate_lazy (type
);
949 val
->allocate_contents (check_size
);
954 /* Allocate a value and its contents for type TYPE. */
957 value::allocate (struct type
*type
)
959 return allocate (type
, true);
965 value::allocate_register_lazy (const frame_info_ptr
&initial_next_frame
,
966 int regnum
, struct type
*type
)
969 type
= register_type (frame_unwind_arch (initial_next_frame
), regnum
);
971 value
*result
= value::allocate_lazy (type
);
973 result
->set_lval (lval_register
);
974 result
->m_location
.reg
.regnum
= regnum
;
976 /* If this register value is created during unwind (while computing a frame
977 id), and NEXT_FRAME is a frame inlined in the frame being unwound, then
978 NEXT_FRAME will not have a valid frame id yet. Find the next non-inline
979 frame (possibly the sentinel frame). This is where registers are unwound
981 frame_info_ptr next_frame
= initial_next_frame
;
982 while (get_frame_type (next_frame
) == INLINE_FRAME
)
983 next_frame
= get_next_frame_sentinel_okay (next_frame
);
985 result
->m_location
.reg
.next_frame_id
= get_frame_id (next_frame
);
987 /* We should have a next frame with a valid id. */
988 gdb_assert (frame_id_p (result
->m_location
.reg
.next_frame_id
));
996 value::allocate_register (const frame_info_ptr
&next_frame
, int regnum
,
999 value
*result
= value::allocate_register_lazy (next_frame
, regnum
, type
);
1000 result
->set_lazy (false);
1004 /* Allocate a value that has the correct length
1005 for COUNT repetitions of type TYPE. */
1008 allocate_repeat_value (struct type
*type
, int count
)
1010 /* Despite the fact that we are really creating an array of TYPE here, we
1011 use the string lower bound as the array lower bound. This seems to
1012 work fine for now. */
1013 int low_bound
= current_language
->string_lower_bound ();
1014 /* FIXME-type-allocation: need a way to free this type when we are
1016 struct type
*array_type
1017 = lookup_array_range_type (type
, low_bound
, count
+ low_bound
- 1);
1019 return value::allocate (array_type
);
1023 value::allocate_computed (struct type
*type
,
1024 const struct lval_funcs
*funcs
,
1027 struct value
*v
= value::allocate_lazy (type
);
1029 v
->set_lval (lval_computed
);
1030 v
->m_location
.computed
.funcs
= funcs
;
1031 v
->m_location
.computed
.closure
= closure
;
1039 value::allocate_optimized_out (struct type
*type
)
1041 struct value
*retval
= value::allocate_lazy (type
);
1043 retval
->mark_bytes_optimized_out (0, type
->length ());
1044 retval
->set_lazy (false);
1048 /* Accessor methods. */
1050 gdb::array_view
<gdb_byte
>
1051 value::contents_raw ()
1053 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1055 allocate_contents (true);
1057 ULONGEST length
= type ()->length ();
1058 return gdb::make_array_view
1059 (m_contents
.get () + m_embedded_offset
* unit_size
, length
);
1062 gdb::array_view
<gdb_byte
>
1063 value::contents_all_raw ()
1065 allocate_contents (true);
1067 ULONGEST length
= enclosing_type ()->length ();
1068 return gdb::make_array_view (m_contents
.get (), length
);
1071 /* Look at value.h for description. */
1074 value_actual_type (struct value
*value
, int resolve_simple_types
,
1075 int *real_type_found
)
1077 struct value_print_options opts
;
1078 struct type
*result
;
1080 get_user_print_options (&opts
);
1082 if (real_type_found
)
1083 *real_type_found
= 0;
1084 result
= value
->type ();
1085 if (opts
.objectprint
)
1087 /* If result's target type is TYPE_CODE_STRUCT, proceed to
1088 fetch its rtti type. */
1089 if (result
->is_pointer_or_reference ()
1090 && (check_typedef (result
->target_type ())->code ()
1091 == TYPE_CODE_STRUCT
)
1092 && !value
->optimized_out ())
1094 struct type
*real_type
;
1096 real_type
= value_rtti_indirect_type (value
, NULL
, NULL
, NULL
);
1099 if (real_type_found
)
1100 *real_type_found
= 1;
1104 else if (resolve_simple_types
)
1106 if (real_type_found
)
1107 *real_type_found
= 1;
1108 result
= value
->enclosing_type ();
1116 error_value_optimized_out (void)
1118 throw_error (OPTIMIZED_OUT_ERROR
, _("value has been optimized out"));
1122 value::require_not_optimized_out () const
1124 if (!m_optimized_out
.empty ())
1126 if (m_lval
== lval_register
)
1127 throw_error (OPTIMIZED_OUT_ERROR
,
1128 _("register has not been saved in frame"));
1130 error_value_optimized_out ();
1135 value::require_available () const
1137 if (!m_unavailable
.empty ())
1138 throw_error (NOT_AVAILABLE_ERROR
, _("value is not available"));
1141 gdb::array_view
<const gdb_byte
>
1142 value::contents_for_printing ()
1147 ULONGEST length
= enclosing_type ()->length ();
1148 return gdb::make_array_view (m_contents
.get (), length
);
1151 gdb::array_view
<const gdb_byte
>
1152 value::contents_for_printing () const
1154 gdb_assert (!m_lazy
);
1156 ULONGEST length
= enclosing_type ()->length ();
1157 return gdb::make_array_view (m_contents
.get (), length
);
1160 gdb::array_view
<const gdb_byte
>
1161 value::contents_all ()
1163 gdb::array_view
<const gdb_byte
> result
= contents_for_printing ();
1164 require_not_optimized_out ();
1165 require_available ();
1169 /* Copy ranges in SRC_RANGE that overlap [SRC_BIT_OFFSET,
1170 SRC_BIT_OFFSET+BIT_LENGTH) ranges into *DST_RANGE, adjusted. */
1173 ranges_copy_adjusted (std::vector
<range
> *dst_range
, int dst_bit_offset
,
1174 const std::vector
<range
> &src_range
, int src_bit_offset
,
1175 unsigned int bit_length
)
1177 for (const range
&r
: src_range
)
1181 l
= std::max (r
.offset
, (LONGEST
) src_bit_offset
);
1182 h
= std::min ((LONGEST
) (r
.offset
+ r
.length
),
1183 (LONGEST
) src_bit_offset
+ bit_length
);
1186 insert_into_bit_range_vector (dst_range
,
1187 dst_bit_offset
+ (l
- src_bit_offset
),
1195 value::ranges_copy_adjusted (struct value
*dst
, int dst_bit_offset
,
1196 int src_bit_offset
, int bit_length
) const
1198 ::ranges_copy_adjusted (&dst
->m_unavailable
, dst_bit_offset
,
1199 m_unavailable
, src_bit_offset
,
1201 ::ranges_copy_adjusted (&dst
->m_optimized_out
, dst_bit_offset
,
1202 m_optimized_out
, src_bit_offset
,
1209 value::contents_copy_raw (struct value
*dst
, LONGEST dst_offset
,
1210 LONGEST src_offset
, LONGEST length
)
1212 LONGEST src_bit_offset
, dst_bit_offset
, bit_length
;
1213 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
1215 /* A lazy DST would make that this copy operation useless, since as
1216 soon as DST's contents were un-lazied (by a later value_contents
1217 call, say), the contents would be overwritten. A lazy SRC would
1218 mean we'd be copying garbage. */
1219 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1221 ULONGEST copy_length
= length
;
1222 ULONGEST limit
= m_limited_length
;
1223 if (limit
> 0 && src_offset
+ length
> limit
)
1224 copy_length
= src_offset
> limit
? 0 : limit
- src_offset
;
1226 /* The overwritten DST range gets unavailability ORed in, not
1227 replaced. Make sure to remember to implement replacing if it
1228 turns out actually necessary. */
1229 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1230 gdb_assert (!dst
->bits_any_optimized_out (TARGET_CHAR_BIT
* dst_offset
,
1231 TARGET_CHAR_BIT
* length
));
1233 if ((src_offset
+ copy_length
) * unit_size
> enclosing_type ()-> length ())
1234 error (_("access outside bounds of object"));
1236 /* Copy the data. */
1237 gdb::array_view
<gdb_byte
> dst_contents
1238 = dst
->contents_all_raw ().slice (dst_offset
* unit_size
,
1239 copy_length
* unit_size
);
1240 gdb::array_view
<const gdb_byte
> src_contents
1241 = contents_all_raw ().slice (src_offset
* unit_size
,
1242 copy_length
* unit_size
);
1243 gdb::copy (src_contents
, dst_contents
);
1245 /* Copy the meta-data, adjusted. */
1246 src_bit_offset
= src_offset
* unit_size
* HOST_CHAR_BIT
;
1247 dst_bit_offset
= dst_offset
* unit_size
* HOST_CHAR_BIT
;
1248 bit_length
= length
* unit_size
* HOST_CHAR_BIT
;
1250 ranges_copy_adjusted (dst
, dst_bit_offset
,
1251 src_bit_offset
, bit_length
);
1257 value::contents_copy_raw_bitwise (struct value
*dst
, LONGEST dst_bit_offset
,
1258 LONGEST src_bit_offset
,
1261 /* A lazy DST would make that this copy operation useless, since as
1262 soon as DST's contents were un-lazied (by a later value_contents
1263 call, say), the contents would be overwritten. A lazy SRC would
1264 mean we'd be copying garbage. */
1265 gdb_assert (!dst
->m_lazy
&& !m_lazy
);
1267 ULONGEST copy_bit_length
= bit_length
;
1268 ULONGEST bit_limit
= m_limited_length
* TARGET_CHAR_BIT
;
1269 if (bit_limit
> 0 && src_bit_offset
+ bit_length
> bit_limit
)
1270 copy_bit_length
= (src_bit_offset
> bit_limit
? 0
1271 : bit_limit
- src_bit_offset
);
1273 /* The overwritten DST range gets unavailability ORed in, not
1274 replaced. Make sure to remember to implement replacing if it
1275 turns out actually necessary. */
1276 LONGEST dst_offset
= dst_bit_offset
/ TARGET_CHAR_BIT
;
1277 LONGEST length
= bit_length
/ TARGET_CHAR_BIT
;
1278 gdb_assert (dst
->bytes_available (dst_offset
, length
));
1279 gdb_assert (!dst
->bits_any_optimized_out (dst_bit_offset
,
1282 /* Copy the data. */
1283 gdb::array_view
<gdb_byte
> dst_contents
= dst
->contents_all_raw ();
1284 gdb::array_view
<const gdb_byte
> src_contents
= contents_all_raw ();
1285 copy_bitwise (dst_contents
.data (), dst_bit_offset
,
1286 src_contents
.data (), src_bit_offset
,
1288 type_byte_order (type ()) == BFD_ENDIAN_BIG
);
1290 /* Copy the meta-data. */
1291 ranges_copy_adjusted (dst
, dst_bit_offset
, src_bit_offset
, bit_length
);
1297 value::contents_copy (struct value
*dst
, LONGEST dst_offset
,
1298 LONGEST src_offset
, LONGEST length
)
1303 contents_copy_raw (dst
, dst_offset
, src_offset
, length
);
1306 gdb::array_view
<const gdb_byte
>
1309 gdb::array_view
<const gdb_byte
> result
= contents_writeable ();
1310 require_not_optimized_out ();
1311 require_available ();
1315 gdb::array_view
<gdb_byte
>
1316 value::contents_writeable ()
1320 return contents_raw ();
1324 value::optimized_out ()
1328 /* See if we can compute the result without fetching the
1330 if (this->lval () == lval_memory
)
1332 else if (this->lval () == lval_computed
)
1334 const struct lval_funcs
*funcs
= m_location
.computed
.funcs
;
1336 if (funcs
->is_optimized_out
!= nullptr)
1337 return funcs
->is_optimized_out (this);
1340 /* Fall back to fetching. */
1345 catch (const gdb_exception_error
&ex
)
1350 case OPTIMIZED_OUT_ERROR
:
1351 case NOT_AVAILABLE_ERROR
:
1352 /* These can normally happen when we try to access an
1353 optimized out or unavailable register, either in a
1354 physical register or spilled to memory. */
1362 return !m_optimized_out
.empty ();
1365 /* Mark contents of VALUE as optimized out, starting at OFFSET bytes, and
1366 the following LENGTH bytes. */
1369 value::mark_bytes_optimized_out (int offset
, int length
)
1371 mark_bits_optimized_out (offset
* TARGET_CHAR_BIT
,
1372 length
* TARGET_CHAR_BIT
);
1378 value::mark_bits_optimized_out (LONGEST offset
, LONGEST length
)
1380 insert_into_bit_range_vector (&m_optimized_out
, offset
, length
);
1384 value::bits_synthetic_pointer (LONGEST offset
, LONGEST length
) const
1386 if (m_lval
!= lval_computed
1387 || !m_location
.computed
.funcs
->check_synthetic_pointer
)
1389 return m_location
.computed
.funcs
->check_synthetic_pointer (this, offset
,
1393 const struct lval_funcs
*
1394 value::computed_funcs () const
1396 gdb_assert (m_lval
== lval_computed
);
1398 return m_location
.computed
.funcs
;
1402 value::computed_closure () const
1404 gdb_assert (m_lval
== lval_computed
);
1406 return m_location
.computed
.closure
;
1410 value::address () const
1412 if (m_lval
!= lval_memory
)
1414 if (m_parent
!= NULL
)
1415 return m_parent
->address () + m_offset
;
1416 if (NULL
!= TYPE_DATA_LOCATION (type ()))
1418 gdb_assert (TYPE_DATA_LOCATION (type ())->is_constant ());
1419 return TYPE_DATA_LOCATION_ADDR (type ());
1422 return m_location
.address
+ m_offset
;
1426 value::raw_address () const
1428 if (m_lval
!= lval_memory
)
1430 return m_location
.address
;
1434 value::set_address (CORE_ADDR addr
)
1436 gdb_assert (m_lval
== lval_memory
);
1437 m_location
.address
= addr
;
1440 /* Return a mark in the value chain. All values allocated after the
1441 mark is obtained (except for those released) are subject to being freed
1442 if a subsequent value_free_to_mark is passed the mark. */
1446 if (all_values
.empty ())
1448 return all_values
.back ().get ();
1451 /* Release a reference to VAL, which was acquired with value_incref.
1452 This function is also called to deallocate values from the value
1458 gdb_assert (m_reference_count
> 0);
1459 m_reference_count
--;
1460 if (m_reference_count
== 0)
1464 /* Free all values allocated since MARK was obtained by value_mark
1465 (except for those released). */
1467 value_free_to_mark (const struct value
*mark
)
1469 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1470 if (iter
== all_values
.end ())
1471 all_values
.clear ();
1473 all_values
.erase (iter
+ 1, all_values
.end ());
1476 /* Remove VAL from the chain all_values
1477 so it will not be freed automatically. */
1480 release_value (struct value
*val
)
1483 return value_ref_ptr ();
1485 std::vector
<value_ref_ptr
>::reverse_iterator iter
;
1486 for (iter
= all_values
.rbegin (); iter
!= all_values
.rend (); ++iter
)
1490 value_ref_ptr result
= *iter
;
1491 all_values
.erase (iter
.base () - 1);
1496 /* We must always return an owned reference. Normally this happens
1497 because we transfer the reference from the value chain, but in
1498 this case the value was not on the chain. */
1499 return value_ref_ptr::new_reference (val
);
1504 std::vector
<value_ref_ptr
>
1505 value_release_to_mark (const struct value
*mark
)
1507 std::vector
<value_ref_ptr
> result
;
1509 auto iter
= std::find (all_values
.begin (), all_values
.end (), mark
);
1510 if (iter
== all_values
.end ())
1511 std::swap (result
, all_values
);
1514 std::move (iter
+ 1, all_values
.end (), std::back_inserter (result
));
1515 all_values
.erase (iter
+ 1, all_values
.end ());
1517 std::reverse (result
.begin (), result
.end ());
1524 value::copy () const
1526 struct type
*encl_type
= enclosing_type ();
1529 val
= value::allocate_lazy (encl_type
);
1530 val
->m_type
= m_type
;
1531 val
->set_lval (m_lval
);
1532 val
->m_location
= m_location
;
1533 val
->m_offset
= m_offset
;
1534 val
->m_bitpos
= m_bitpos
;
1535 val
->m_bitsize
= m_bitsize
;
1536 val
->m_lazy
= m_lazy
;
1537 val
->m_embedded_offset
= embedded_offset ();
1538 val
->m_pointed_to_offset
= m_pointed_to_offset
;
1539 val
->m_modifiable
= m_modifiable
;
1540 val
->m_stack
= m_stack
;
1541 val
->m_is_zero
= m_is_zero
;
1542 val
->m_in_history
= m_in_history
;
1543 val
->m_initialized
= m_initialized
;
1544 val
->m_unavailable
= m_unavailable
;
1545 val
->m_optimized_out
= m_optimized_out
;
1546 val
->m_parent
= m_parent
;
1547 val
->m_limited_length
= m_limited_length
;
1550 && !(val
->entirely_optimized_out ()
1551 || val
->entirely_unavailable ()))
1553 ULONGEST length
= val
->m_limited_length
;
1555 length
= val
->enclosing_type ()->length ();
1557 gdb_assert (m_contents
!= nullptr);
1558 const auto &arg_view
1559 = gdb::make_array_view (m_contents
.get (), length
);
1561 val
->allocate_contents (false);
1562 gdb::array_view
<gdb_byte
> val_contents
1563 = val
->contents_all_raw ().slice (0, length
);
1565 gdb::copy (arg_view
, val_contents
);
1568 if (val
->lval () == lval_computed
)
1570 const struct lval_funcs
*funcs
= val
->m_location
.computed
.funcs
;
1572 if (funcs
->copy_closure
)
1573 val
->m_location
.computed
.closure
= funcs
->copy_closure (val
);
1578 /* Return a "const" and/or "volatile" qualified version of the value V.
1579 If CNST is true, then the returned value will be qualified with
1581 if VOLTL is true, then the returned value will be qualified with
1585 make_cv_value (int cnst
, int voltl
, struct value
*v
)
1587 struct type
*val_type
= v
->type ();
1588 struct type
*m_enclosing_type
= v
->enclosing_type ();
1589 struct value
*cv_val
= v
->copy ();
1591 cv_val
->deprecated_set_type (make_cv_type (cnst
, voltl
, val_type
, NULL
));
1592 cv_val
->set_enclosing_type (make_cv_type (cnst
, voltl
, m_enclosing_type
, NULL
));
1602 if (this->lval () != not_lval
)
1604 struct type
*enc_type
= enclosing_type ();
1605 struct value
*val
= value::allocate (enc_type
);
1607 gdb::copy (contents_all (), val
->contents_all_raw ());
1608 val
->m_type
= m_type
;
1609 val
->set_embedded_offset (embedded_offset ());
1610 val
->set_pointed_to_offset (pointed_to_offset ());
1619 value::force_lval (CORE_ADDR addr
)
1621 gdb_assert (this->lval () == not_lval
);
1623 write_memory (addr
, contents_raw ().data (), type ()->length ());
1624 m_lval
= lval_memory
;
1625 m_location
.address
= addr
;
1629 value::set_component_location (const struct value
*whole
)
1633 gdb_assert (whole
->m_lval
!= lval_xcallable
);
1635 if (whole
->m_lval
== lval_internalvar
)
1636 m_lval
= lval_internalvar_component
;
1638 m_lval
= whole
->m_lval
;
1640 m_location
= whole
->m_location
;
1641 if (whole
->m_lval
== lval_computed
)
1643 const struct lval_funcs
*funcs
= whole
->m_location
.computed
.funcs
;
1645 if (funcs
->copy_closure
)
1646 m_location
.computed
.closure
= funcs
->copy_closure (whole
);
1649 /* If the WHOLE value has a dynamically resolved location property then
1650 update the address of the COMPONENT. */
1651 type
= whole
->type ();
1652 if (NULL
!= TYPE_DATA_LOCATION (type
)
1653 && TYPE_DATA_LOCATION (type
)->is_constant ())
1654 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1656 /* Similarly, if the COMPONENT value has a dynamically resolved location
1657 property then update its address. */
1658 type
= this->type ();
1659 if (NULL
!= TYPE_DATA_LOCATION (type
)
1660 && TYPE_DATA_LOCATION (type
)->is_constant ())
1662 /* If the COMPONENT has a dynamic location, and is an
1663 lval_internalvar_component, then we change it to a lval_memory.
1665 Usually a component of an internalvar is created non-lazy, and has
1666 its content immediately copied from the parent internalvar.
1667 However, for components with a dynamic location, the content of
1668 the component is not contained within the parent, but is instead
1669 accessed indirectly. Further, the component will be created as a
1672 By changing the type of the component to lval_memory we ensure
1673 that value_fetch_lazy can successfully load the component.
1675 This solution isn't ideal, but a real fix would require values to
1676 carry around both the parent value contents, and the contents of
1677 any dynamic fields within the parent. This is a substantial
1678 change to how values work in GDB. */
1679 if (this->lval () == lval_internalvar_component
)
1681 gdb_assert (lazy ());
1682 m_lval
= lval_memory
;
1685 gdb_assert (this->lval () == lval_memory
);
1686 set_address (TYPE_DATA_LOCATION_ADDR (type
));
1690 /* Access to the value history. */
1692 /* Record a new value in the value history.
1693 Returns the absolute history index of the entry. */
1696 value::record_latest ()
1698 /* We don't want this value to have anything to do with the inferior anymore.
1699 In particular, "set $1 = 50" should not affect the variable from which
1700 the value was taken, and fast watchpoints should be able to assume that
1701 a value on the value history never changes. */
1705 /* Mark the value as recorded in the history for the availability check. */
1706 m_in_history
= true;
1708 /* We preserve VALUE_LVAL so that the user can find out where it was fetched
1709 from. This is a bit dubious, because then *&$1 does not just return $1
1710 but the current contents of that location. c'est la vie... */
1711 set_modifiable (false);
1713 value_history
.push_back (release_value (this));
1715 return value_history
.size ();
1718 /* Return a copy of the value in the history with sequence number NUM. */
1721 access_value_history (int num
)
1726 absnum
+= value_history
.size ();
1731 error (_("The history is empty."));
1733 error (_("There is only one value in the history."));
1735 error (_("History does not go back to $$%d."), -num
);
1737 if (absnum
> value_history
.size ())
1738 error (_("History has not yet reached $%d."), absnum
);
1742 return value_history
[absnum
]->copy ();
1748 value_history_count ()
1750 return value_history
.size ();
1754 show_values (const char *num_exp
, int from_tty
)
1762 /* "show values +" should print from the stored position.
1763 "show values <exp>" should print around value number <exp>. */
1764 if (num_exp
[0] != '+' || num_exp
[1] != '\0')
1765 num
= parse_and_eval_long (num_exp
) - 5;
1769 /* "show values" means print the last 10 values. */
1770 num
= value_history
.size () - 9;
1776 for (i
= num
; i
< num
+ 10 && i
<= value_history
.size (); i
++)
1778 struct value_print_options opts
;
1780 val
= access_value_history (i
);
1781 gdb_printf (("$%d = "), i
);
1782 get_user_print_options (&opts
);
1783 value_print (val
, gdb_stdout
, &opts
);
1784 gdb_printf (("\n"));
1787 /* The next "show values +" should start after what we just printed. */
1790 /* Hitting just return after this command should do the same thing as
1791 "show values +". If num_exp is null, this is unnecessary, since
1792 "show values +" is not useful after "show values". */
1793 if (from_tty
&& num_exp
)
1794 set_repeat_arguments ("+");
1797 enum internalvar_kind
1799 /* The internal variable is empty. */
1802 /* The value of the internal variable is provided directly as
1803 a GDB value object. */
1806 /* A fresh value is computed via a call-back routine on every
1807 access to the internal variable. */
1808 INTERNALVAR_MAKE_VALUE
,
1810 /* The internal variable holds a GDB internal convenience function. */
1811 INTERNALVAR_FUNCTION
,
1813 /* The variable holds an integer value. */
1814 INTERNALVAR_INTEGER
,
1816 /* The variable holds a GDB-provided string. */
1820 union internalvar_data
1822 /* A value object used with INTERNALVAR_VALUE. */
1823 struct value
*value
;
1825 /* The call-back routine used with INTERNALVAR_MAKE_VALUE. */
1828 /* The functions to call. */
1829 const struct internalvar_funcs
*functions
;
1831 /* The function's user-data. */
1835 /* The internal function used with INTERNALVAR_FUNCTION. */
1838 struct internal_function
*function
;
1839 /* True if this is the canonical name for the function. */
1843 /* An integer value used with INTERNALVAR_INTEGER. */
1846 /* If type is non-NULL, it will be used as the type to generate
1847 a value for this internal variable. If type is NULL, a default
1848 integer type for the architecture is used. */
1853 /* A string value used with INTERNALVAR_STRING. */
1857 /* Internal variables. These are variables within the debugger
1858 that hold values assigned by debugger commands.
1859 The user refers to them with a '$' prefix
1860 that does not appear in the variable names stored internally. */
1864 internalvar (std::string name
)
1865 : name (std::move (name
))
1870 /* We support various different kinds of content of an internal variable.
1871 enum internalvar_kind specifies the kind, and union internalvar_data
1872 provides the data associated with this particular kind. */
1874 enum internalvar_kind kind
= INTERNALVAR_VOID
;
1876 union internalvar_data u
{};
1879 /* Use std::map, a sorted container, to make the order of iteration (and
1880 therefore the output of "show convenience") stable. */
1882 static std::map
<std::string
, internalvar
> internalvars
;
1884 /* If the variable does not already exist create it and give it the
1885 value given. If no value is given then the default is zero. */
1887 init_if_undefined_command (const char* args
, int from_tty
)
1889 struct internalvar
*intvar
= nullptr;
1891 /* Parse the expression - this is taken from set_command(). */
1892 expression_up expr
= parse_expression (args
);
1894 /* Validate the expression.
1895 Was the expression an assignment?
1896 Or even an expression at all? */
1897 if (expr
->first_opcode () != BINOP_ASSIGN
)
1898 error (_("Init-if-undefined requires an assignment expression."));
1900 /* Extract the variable from the parsed expression. */
1901 expr::assign_operation
*assign
1902 = dynamic_cast<expr::assign_operation
*> (expr
->op
.get ());
1903 if (assign
!= nullptr)
1905 expr::operation
*lhs
= assign
->get_lhs ();
1906 expr::internalvar_operation
*ivarop
1907 = dynamic_cast<expr::internalvar_operation
*> (lhs
);
1908 if (ivarop
!= nullptr)
1909 intvar
= ivarop
->get_internalvar ();
1912 if (intvar
== nullptr)
1913 error (_("The first parameter to init-if-undefined "
1914 "should be a GDB variable."));
1916 /* Only evaluate the expression if the lvalue is void.
1917 This may still fail if the expression is invalid. */
1918 if (intvar
->kind
== INTERNALVAR_VOID
)
1923 /* Look up an internal variable with name NAME. NAME should not
1924 normally include a dollar sign.
1926 If the specified internal variable does not exist,
1927 the return value is NULL. */
1929 struct internalvar
*
1930 lookup_only_internalvar (const char *name
)
1932 auto it
= internalvars
.find (name
);
1933 if (it
== internalvars
.end ())
1939 /* Complete NAME by comparing it to the names of internal
1943 complete_internalvar (completion_tracker
&tracker
, const char *name
)
1945 int len
= strlen (name
);
1947 for (auto &pair
: internalvars
)
1949 const internalvar
&var
= pair
.second
;
1951 if (var
.name
.compare (0, len
, name
) == 0)
1952 tracker
.add_completion (make_unique_xstrdup (var
.name
.c_str ()));
1956 /* Create an internal variable with name NAME and with a void value.
1957 NAME should not normally include a dollar sign.
1959 An internal variable with that name must not exist already. */
1961 struct internalvar
*
1962 create_internalvar (const char *name
)
1964 auto pair
= internalvars
.emplace (std::make_pair (name
, internalvar (name
)));
1965 gdb_assert (pair
.second
);
1967 return &pair
.first
->second
;
1970 /* Create an internal variable with name NAME and register FUN as the
1971 function that value_of_internalvar uses to create a value whenever
1972 this variable is referenced. NAME should not normally include a
1973 dollar sign. DATA is passed uninterpreted to FUN when it is
1974 called. CLEANUP, if not NULL, is called when the internal variable
1975 is destroyed. It is passed DATA as its only argument. */
1977 struct internalvar
*
1978 create_internalvar_type_lazy (const char *name
,
1979 const struct internalvar_funcs
*funcs
,
1982 struct internalvar
*var
= create_internalvar (name
);
1984 var
->kind
= INTERNALVAR_MAKE_VALUE
;
1985 var
->u
.make_value
.functions
= funcs
;
1986 var
->u
.make_value
.data
= data
;
1990 /* See documentation in value.h. */
1993 compile_internalvar_to_ax (struct internalvar
*var
,
1994 struct agent_expr
*expr
,
1995 struct axs_value
*value
)
1997 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
1998 || var
->u
.make_value
.functions
->compile_to_ax
== NULL
)
2001 var
->u
.make_value
.functions
->compile_to_ax (var
, expr
, value
,
2002 var
->u
.make_value
.data
);
2006 /* Look up an internal variable with name NAME. NAME should not
2007 normally include a dollar sign.
2009 If the specified internal variable does not exist,
2010 one is created, with a void value. */
2012 struct internalvar
*
2013 lookup_internalvar (const char *name
)
2015 struct internalvar
*var
;
2017 var
= lookup_only_internalvar (name
);
2021 return create_internalvar (name
);
2024 /* Return current value of internal variable VAR. For variables that
2025 are not inherently typed, use a value type appropriate for GDBARCH. */
2028 value_of_internalvar (struct gdbarch
*gdbarch
, struct internalvar
*var
)
2031 struct trace_state_variable
*tsv
;
2033 /* If there is a trace state variable of the same name, assume that
2034 is what we really want to see. */
2035 tsv
= find_trace_state_variable (var
->name
.c_str ());
2038 tsv
->value_known
= target_get_trace_state_variable_value (tsv
->number
,
2040 if (tsv
->value_known
)
2041 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int64
,
2044 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2050 case INTERNALVAR_VOID
:
2051 val
= value::allocate (builtin_type (gdbarch
)->builtin_void
);
2054 case INTERNALVAR_FUNCTION
:
2055 val
= value::allocate (builtin_type (gdbarch
)->internal_fn
);
2058 case INTERNALVAR_INTEGER
:
2059 if (!var
->u
.integer
.type
)
2060 val
= value_from_longest (builtin_type (gdbarch
)->builtin_int
,
2061 var
->u
.integer
.val
);
2063 val
= value_from_longest (var
->u
.integer
.type
, var
->u
.integer
.val
);
2066 case INTERNALVAR_STRING
:
2067 val
= current_language
->value_string (gdbarch
,
2069 strlen (var
->u
.string
));
2072 case INTERNALVAR_VALUE
:
2073 val
= var
->u
.value
->copy ();
2078 case INTERNALVAR_MAKE_VALUE
:
2079 val
= (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2080 var
->u
.make_value
.data
);
2084 internal_error (_("bad kind"));
2087 /* Change the VALUE_LVAL to lval_internalvar so that future operations
2088 on this value go back to affect the original internal variable.
2090 Do not do this for INTERNALVAR_MAKE_VALUE variables, as those have
2091 no underlying modifiable state in the internal variable.
2093 Likewise, if the variable's value is a computed lvalue, we want
2094 references to it to produce another computed lvalue, where
2095 references and assignments actually operate through the
2096 computed value's functions.
2098 This means that internal variables with computed values
2099 behave a little differently from other internal variables:
2100 assignments to them don't just replace the previous value
2101 altogether. At the moment, this seems like the behavior we
2104 if (var
->kind
!= INTERNALVAR_MAKE_VALUE
2105 && val
->lval () != lval_computed
)
2107 val
->set_lval (lval_internalvar
);
2108 VALUE_INTERNALVAR (val
) = var
;
2115 get_internalvar_integer (struct internalvar
*var
, LONGEST
*result
)
2117 if (var
->kind
== INTERNALVAR_INTEGER
)
2119 *result
= var
->u
.integer
.val
;
2123 if (var
->kind
== INTERNALVAR_VALUE
)
2125 struct type
*type
= check_typedef (var
->u
.value
->type ());
2127 if (type
->code () == TYPE_CODE_INT
)
2129 *result
= value_as_long (var
->u
.value
);
2134 if (var
->kind
== INTERNALVAR_MAKE_VALUE
)
2136 struct gdbarch
*gdbarch
= get_current_arch ();
2138 = (*var
->u
.make_value
.functions
->make_value
) (gdbarch
, var
,
2139 var
->u
.make_value
.data
);
2140 struct type
*type
= check_typedef (val
->type ());
2142 if (type
->code () == TYPE_CODE_INT
)
2144 *result
= value_as_long (val
);
2153 get_internalvar_function (struct internalvar
*var
,
2154 struct internal_function
**result
)
2158 case INTERNALVAR_FUNCTION
:
2159 *result
= var
->u
.fn
.function
;
2168 set_internalvar_component (struct internalvar
*var
,
2169 LONGEST offset
, LONGEST bitpos
,
2170 LONGEST bitsize
, struct value
*newval
)
2173 struct gdbarch
*gdbarch
;
2178 case INTERNALVAR_VALUE
:
2179 addr
= var
->u
.value
->contents_writeable ().data ();
2180 gdbarch
= var
->u
.value
->arch ();
2181 unit_size
= gdbarch_addressable_memory_unit_size (gdbarch
);
2184 modify_field (var
->u
.value
->type (), addr
+ offset
,
2185 value_as_long (newval
), bitpos
, bitsize
);
2187 memcpy (addr
+ offset
* unit_size
, newval
->contents ().data (),
2188 newval
->type ()->length ());
2192 /* We can never get a component of any other kind. */
2193 internal_error (_("set_internalvar_component"));
2198 set_internalvar (struct internalvar
*var
, struct value
*val
)
2200 enum internalvar_kind new_kind
;
2201 union internalvar_data new_data
= { 0 };
2203 if (var
->kind
== INTERNALVAR_FUNCTION
&& var
->u
.fn
.canonical
)
2204 error (_("Cannot overwrite convenience function %s"), var
->name
.c_str ());
2206 /* Prepare new contents. */
2207 switch (check_typedef (val
->type ())->code ())
2209 case TYPE_CODE_VOID
:
2210 new_kind
= INTERNALVAR_VOID
;
2213 case TYPE_CODE_INTERNAL_FUNCTION
:
2214 gdb_assert (val
->lval () == lval_internalvar
);
2215 new_kind
= INTERNALVAR_FUNCTION
;
2216 get_internalvar_function (VALUE_INTERNALVAR (val
),
2217 &new_data
.fn
.function
);
2218 /* Copies created here are never canonical. */
2222 new_kind
= INTERNALVAR_VALUE
;
2223 struct value
*copy
= val
->copy ();
2224 copy
->set_modifiable (true);
2226 /* Force the value to be fetched from the target now, to avoid problems
2227 later when this internalvar is referenced and the target is gone or
2230 copy
->fetch_lazy ();
2232 /* Release the value from the value chain to prevent it from being
2233 deleted by free_all_values. From here on this function should not
2234 call error () until new_data is installed into the var->u to avoid
2236 new_data
.value
= release_value (copy
).release ();
2238 /* Internal variables which are created from values with a dynamic
2239 location don't need the location property of the origin anymore.
2240 The resolved dynamic location is used prior then any other address
2241 when accessing the value.
2242 If we keep it, we would still refer to the origin value.
2243 Remove the location property in case it exist. */
2244 new_data
.value
->type ()->remove_dyn_prop (DYN_PROP_DATA_LOCATION
);
2249 /* Clean up old contents. */
2250 clear_internalvar (var
);
2253 var
->kind
= new_kind
;
2255 /* End code which must not call error(). */
2259 set_internalvar_integer (struct internalvar
*var
, LONGEST l
)
2261 /* Clean up old contents. */
2262 clear_internalvar (var
);
2264 var
->kind
= INTERNALVAR_INTEGER
;
2265 var
->u
.integer
.type
= NULL
;
2266 var
->u
.integer
.val
= l
;
2270 set_internalvar_string (struct internalvar
*var
, const char *string
)
2272 /* Clean up old contents. */
2273 clear_internalvar (var
);
2275 var
->kind
= INTERNALVAR_STRING
;
2276 var
->u
.string
= xstrdup (string
);
2280 set_internalvar_function (struct internalvar
*var
, struct internal_function
*f
)
2282 /* Clean up old contents. */
2283 clear_internalvar (var
);
2285 var
->kind
= INTERNALVAR_FUNCTION
;
2286 var
->u
.fn
.function
= f
;
2287 var
->u
.fn
.canonical
= 1;
2288 /* Variables installed here are always the canonical version. */
2292 clear_internalvar (struct internalvar
*var
)
2294 /* Clean up old contents. */
2297 case INTERNALVAR_VALUE
:
2298 var
->u
.value
->decref ();
2301 case INTERNALVAR_STRING
:
2302 xfree (var
->u
.string
);
2309 /* Reset to void kind. */
2310 var
->kind
= INTERNALVAR_VOID
;
2314 internalvar_name (const struct internalvar
*var
)
2316 return var
->name
.c_str ();
2319 static struct internal_function
*
2320 create_internal_function (const char *name
,
2321 internal_function_fn_noside handler
, void *cookie
)
2323 struct internal_function
*ifn
= new (struct internal_function
);
2325 ifn
->name
= xstrdup (name
);
2326 ifn
->handler
= handler
;
2327 ifn
->cookie
= cookie
;
2332 value_internal_function_name (struct value
*val
)
2334 struct internal_function
*ifn
;
2337 gdb_assert (val
->lval () == lval_internalvar
);
2338 result
= get_internalvar_function (VALUE_INTERNALVAR (val
), &ifn
);
2339 gdb_assert (result
);
2345 call_internal_function (struct gdbarch
*gdbarch
,
2346 const struct language_defn
*language
,
2347 struct value
*func
, int argc
, struct value
**argv
,
2350 struct internal_function
*ifn
;
2353 gdb_assert (func
->lval () == lval_internalvar
);
2354 result
= get_internalvar_function (VALUE_INTERNALVAR (func
), &ifn
);
2355 gdb_assert (result
);
2357 return ifn
->handler (gdbarch
, language
, ifn
->cookie
, argc
, argv
, noside
);
2360 /* The 'function' command. This does nothing -- it is just a
2361 placeholder to let "help function NAME" work. This is also used as
2362 the implementation of the sub-command that is created when
2363 registering an internal function. */
2365 function_command (const char *command
, int from_tty
)
2370 /* Helper function that does the work for add_internal_function. */
2372 static struct cmd_list_element
*
2373 do_add_internal_function (const char *name
, const char *doc
,
2374 internal_function_fn_noside handler
, void *cookie
)
2376 struct internal_function
*ifn
;
2377 struct internalvar
*var
= lookup_internalvar (name
);
2379 ifn
= create_internal_function (name
, handler
, cookie
);
2380 set_internalvar_function (var
, ifn
);
2382 return add_cmd (name
, no_class
, function_command
, doc
, &functionlist
);
2388 add_internal_function (const char *name
, const char *doc
,
2389 internal_function_fn_noside handler
, void *cookie
)
2391 do_add_internal_function (name
, doc
, handler
, cookie
);
2394 /* By default, internal functions are assumed to return int. Return a value
2395 with that type to reflect this. If this is not correct for a specific
2396 internal function, it should use an internal_function_fn_noside handler to
2397 bypass this default. */
2399 static struct value
*
2400 internal_function_default_return_type (struct gdbarch
*gdbarch
)
2402 return value::zero (builtin_type (gdbarch
)->builtin_int
, not_lval
);
2408 add_internal_function (const char *name
, const char *doc
,
2409 internal_function_fn handler
, void *cookie
)
2411 internal_function_fn_noside fn
2412 = [=] (struct gdbarch
*gdbarch
,
2413 const struct language_defn
*language
,
2416 struct value
**argv
,
2419 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
2420 return internal_function_default_return_type (gdbarch
);
2421 return handler (gdbarch
, language
, _cookie
, argc
, argv
);
2424 do_add_internal_function (name
, doc
, fn
, cookie
);
2430 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2431 gdb::unique_xmalloc_ptr
<char> &&doc
,
2432 internal_function_fn_noside handler
, void *cookie
)
2434 struct cmd_list_element
*cmd
2435 = do_add_internal_function (name
.get (), doc
.get (), handler
, cookie
);
2437 /* Manually transfer the ownership of the doc and name strings to CMD by
2438 setting the appropriate flags. */
2439 (void) doc
.release ();
2440 cmd
->doc_allocated
= 1;
2441 (void) name
.release ();
2442 cmd
->name_allocated
= 1;
2448 add_internal_function (gdb::unique_xmalloc_ptr
<char> &&name
,
2449 gdb::unique_xmalloc_ptr
<char> &&doc
,
2450 internal_function_fn handler
, void *cookie
)
2452 internal_function_fn_noside fn
2453 = [=] (struct gdbarch
*gdbarch
,
2454 const struct language_defn
*language
,
2457 struct value
**argv
,
2460 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
2461 return internal_function_default_return_type (gdbarch
);
2462 return handler (gdbarch
, language
, _cookie
, argc
, argv
);
2465 add_internal_function (std::forward
<gdb::unique_xmalloc_ptr
<char>>(name
),
2466 std::forward
<gdb::unique_xmalloc_ptr
<char>>(doc
),
2471 value::preserve (struct objfile
*objfile
, htab_t copied_types
)
2473 if (m_type
->objfile_owner () == objfile
)
2474 m_type
= copy_type_recursive (m_type
, copied_types
);
2476 if (m_enclosing_type
->objfile_owner () == objfile
)
2477 m_enclosing_type
= copy_type_recursive (m_enclosing_type
, copied_types
);
2480 /* Likewise for internal variable VAR. */
2483 preserve_one_internalvar (struct internalvar
*var
, struct objfile
*objfile
,
2484 htab_t copied_types
)
2488 case INTERNALVAR_INTEGER
:
2489 if (var
->u
.integer
.type
2490 && var
->u
.integer
.type
->objfile_owner () == objfile
)
2492 = copy_type_recursive (var
->u
.integer
.type
, copied_types
);
2495 case INTERNALVAR_VALUE
:
2496 var
->u
.value
->preserve (objfile
, copied_types
);
2501 /* Make sure that all types and values referenced by VAROBJ are updated before
2502 OBJFILE is discarded. COPIED_TYPES is used to prevent cycles and
2506 preserve_one_varobj (struct varobj
*varobj
, struct objfile
*objfile
,
2507 htab_t copied_types
)
2509 if (varobj
->type
->is_objfile_owned ()
2510 && varobj
->type
->objfile_owner () == objfile
)
2513 = copy_type_recursive (varobj
->type
, copied_types
);
2516 if (varobj
->value
!= nullptr)
2517 varobj
->value
->preserve (objfile
, copied_types
);
2520 /* Update the internal variables and value history when OBJFILE is
2521 discarded; we must copy the types out of the objfile. New global types
2522 will be created for every convenience variable which currently points to
2523 this objfile's types, and the convenience variables will be adjusted to
2524 use the new global types. */
2527 preserve_values (struct objfile
*objfile
)
2529 /* Create the hash table. We allocate on the objfile's obstack, since
2530 it is soon to be deleted. */
2531 htab_up copied_types
= create_copied_types_hash ();
2533 for (const value_ref_ptr
&item
: value_history
)
2534 item
->preserve (objfile
, copied_types
.get ());
2536 for (auto &pair
: internalvars
)
2537 preserve_one_internalvar (&pair
.second
, objfile
, copied_types
.get ());
2539 /* For the remaining varobj, check that none has type owned by OBJFILE. */
2540 all_root_varobjs ([&copied_types
, objfile
] (struct varobj
*varobj
)
2542 preserve_one_varobj (varobj
, objfile
,
2543 copied_types
.get ());
2546 preserve_ext_lang_values (objfile
, copied_types
.get ());
2550 show_convenience (const char *ignore
, int from_tty
)
2552 struct gdbarch
*gdbarch
= get_current_arch ();
2554 struct value_print_options opts
;
2556 get_user_print_options (&opts
);
2557 for (auto &pair
: internalvars
)
2559 internalvar
&var
= pair
.second
;
2565 gdb_printf (("$%s = "), var
.name
.c_str ());
2571 val
= value_of_internalvar (gdbarch
, &var
);
2572 value_print (val
, gdb_stdout
, &opts
);
2574 catch (const gdb_exception_error
&ex
)
2576 fprintf_styled (gdb_stdout
, metadata_style
.style (),
2577 _("<error: %s>"), ex
.what ());
2580 gdb_printf (("\n"));
2584 /* This text does not mention convenience functions on purpose.
2585 The user can't create them except via Python, and if Python support
2586 is installed this message will never be printed ($_streq will
2588 gdb_printf (_("No debugger convenience variables now defined.\n"
2589 "Convenience variables have "
2590 "names starting with \"$\";\n"
2591 "use \"set\" as in \"set "
2592 "$foo = 5\" to define them.\n"));
2600 value::from_xmethod (xmethod_worker_up
&&worker
)
2604 v
= value::allocate (builtin_type (current_inferior ()->arch ())->xmethod
);
2605 v
->m_lval
= lval_xcallable
;
2606 v
->m_location
.xm_worker
= worker
.release ();
2607 v
->m_modifiable
= false;
2615 value::result_type_of_xmethod (gdb::array_view
<value
*> argv
)
2617 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2618 && m_lval
== lval_xcallable
&& !argv
.empty ());
2620 return m_location
.xm_worker
->get_result_type (argv
[0], argv
.slice (1));
2626 value::call_xmethod (gdb::array_view
<value
*> argv
)
2628 gdb_assert (type ()->code () == TYPE_CODE_XMETHOD
2629 && m_lval
== lval_xcallable
&& !argv
.empty ());
2631 return m_location
.xm_worker
->invoke (argv
[0], argv
.slice (1));
2634 /* Extract a value as a C number (either long or double).
2635 Knows how to convert fixed values to double, or
2636 floating values to long.
2637 Does not deallocate the value. */
2640 value_as_long (struct value
*val
)
2642 /* This coerces arrays and functions, which is necessary (e.g.
2643 in disassemble_command). It also dereferences references, which
2644 I suspect is the most logical thing to do. */
2645 val
= coerce_array (val
);
2646 return unpack_long (val
->type (), val
->contents ().data ());
2652 value_as_mpz (struct value
*val
)
2654 val
= coerce_array (val
);
2655 struct type
*type
= check_typedef (val
->type ());
2657 switch (type
->code ())
2659 case TYPE_CODE_ENUM
:
2660 case TYPE_CODE_BOOL
:
2662 case TYPE_CODE_CHAR
:
2663 case TYPE_CODE_RANGE
:
2667 return gdb_mpz (value_as_long (val
));
2672 gdb::array_view
<const gdb_byte
> valbytes
= val
->contents ();
2673 enum bfd_endian byte_order
= type_byte_order (type
);
2675 /* Handle integers that are either not a multiple of the word size,
2676 or that are stored at some bit offset. */
2677 unsigned bit_off
= 0, bit_size
= 0;
2678 if (type
->bit_size_differs_p ())
2680 bit_size
= type
->bit_size ();
2683 /* We can just handle this immediately. */
2687 bit_off
= type
->bit_offset ();
2689 unsigned n_bytes
= ((bit_off
% 8) + bit_size
+ 7) / 8;
2690 valbytes
= valbytes
.slice (bit_off
/ 8, n_bytes
);
2692 if (byte_order
== BFD_ENDIAN_BIG
)
2693 bit_off
= (n_bytes
* 8 - bit_off
% 8 - bit_size
);
2698 result
.read (val
->contents (), byte_order
, type
->is_unsigned ());
2700 /* Shift off any low bits, if needed. */
2704 /* Mask off any high bits, if needed. */
2706 result
.mask (bit_size
);
2708 /* Now handle any range bias. */
2709 if (type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
2711 /* Unfortunately we have to box here, because LONGEST is
2712 probably wider than long. */
2713 result
+= gdb_mpz (type
->bounds ()->bias
);
2719 /* Extract a value as a C pointer. */
2722 value_as_address (struct value
*val
)
2724 struct gdbarch
*gdbarch
= val
->type ()->arch ();
2726 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2727 whether we want this to be true eventually. */
2729 /* gdbarch_addr_bits_remove is wrong if we are being called for a
2730 non-address (e.g. argument to "signal", "info break", etc.), or
2731 for pointers to char, in which the low bits *are* significant. */
2732 return gdbarch_addr_bits_remove (gdbarch
, value_as_long (val
));
2735 /* There are several targets (IA-64, PowerPC, and others) which
2736 don't represent pointers to functions as simply the address of
2737 the function's entry point. For example, on the IA-64, a
2738 function pointer points to a two-word descriptor, generated by
2739 the linker, which contains the function's entry point, and the
2740 value the IA-64 "global pointer" register should have --- to
2741 support position-independent code. The linker generates
2742 descriptors only for those functions whose addresses are taken.
2744 On such targets, it's difficult for GDB to convert an arbitrary
2745 function address into a function pointer; it has to either find
2746 an existing descriptor for that function, or call malloc and
2747 build its own. On some targets, it is impossible for GDB to
2748 build a descriptor at all: the descriptor must contain a jump
2749 instruction; data memory cannot be executed; and code memory
2752 Upon entry to this function, if VAL is a value of type `function'
2753 (that is, TYPE_CODE (val->type ()) == TYPE_CODE_FUNC), then
2754 val->address () is the address of the function. This is what
2755 you'll get if you evaluate an expression like `main'. The call
2756 to COERCE_ARRAY below actually does all the usual unary
2757 conversions, which includes converting values of type `function'
2758 to `pointer to function'. This is the challenging conversion
2759 discussed above. Then, `unpack_pointer' will convert that pointer
2760 back into an address.
2762 So, suppose the user types `disassemble foo' on an architecture
2763 with a strange function pointer representation, on which GDB
2764 cannot build its own descriptors, and suppose further that `foo'
2765 has no linker-built descriptor. The address->pointer conversion
2766 will signal an error and prevent the command from running, even
2767 though the next step would have been to convert the pointer
2768 directly back into the same address.
2770 The following shortcut avoids this whole mess. If VAL is a
2771 function, just return its address directly. */
2772 if (val
->type ()->code () == TYPE_CODE_FUNC
2773 || val
->type ()->code () == TYPE_CODE_METHOD
)
2774 return val
->address ();
2776 val
= coerce_array (val
);
2778 /* Some architectures (e.g. Harvard), map instruction and data
2779 addresses onto a single large unified address space. For
2780 instance: An architecture may consider a large integer in the
2781 range 0x10000000 .. 0x1000ffff to already represent a data
2782 addresses (hence not need a pointer to address conversion) while
2783 a small integer would still need to be converted integer to
2784 pointer to address. Just assume such architectures handle all
2785 integer conversions in a single function. */
2789 I think INTEGER_TO_ADDRESS is a good idea as proposed --- but we
2790 must admonish GDB hackers to make sure its behavior matches the
2791 compiler's, whenever possible.
2793 In general, I think GDB should evaluate expressions the same way
2794 the compiler does. When the user copies an expression out of
2795 their source code and hands it to a `print' command, they should
2796 get the same value the compiler would have computed. Any
2797 deviation from this rule can cause major confusion and annoyance,
2798 and needs to be justified carefully. In other words, GDB doesn't
2799 really have the freedom to do these conversions in clever and
2802 AndrewC pointed out that users aren't complaining about how GDB
2803 casts integers to pointers; they are complaining that they can't
2804 take an address from a disassembly listing and give it to `x/i'.
2805 This is certainly important.
2807 Adding an architecture method like integer_to_address() certainly
2808 makes it possible for GDB to "get it right" in all circumstances
2809 --- the target has complete control over how things get done, so
2810 people can Do The Right Thing for their target without breaking
2811 anyone else. The standard doesn't specify how integers get
2812 converted to pointers; usually, the ABI doesn't either, but
2813 ABI-specific code is a more reasonable place to handle it. */
2815 if (!val
->type ()->is_pointer_or_reference ()
2816 && gdbarch_integer_to_address_p (gdbarch
))
2817 return gdbarch_integer_to_address (gdbarch
, val
->type (),
2818 val
->contents ().data ());
2820 return unpack_pointer (val
->type (), val
->contents ().data ());
2824 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2825 as a long, or as a double, assuming the raw data is described
2826 by type TYPE. Knows how to convert different sizes of values
2827 and can convert between fixed and floating point. We don't assume
2828 any alignment for the raw data. Return value is in host byte order.
2830 If you want functions and arrays to be coerced to pointers, and
2831 references to be dereferenced, call value_as_long() instead.
2833 C++: It is assumed that the front-end has taken care of
2834 all matters concerning pointers to members. A pointer
2835 to member which reaches here is considered to be equivalent
2836 to an INT (or some size). After all, it is only an offset. */
2839 unpack_long (struct type
*type
, const gdb_byte
*valaddr
)
2841 if (is_fixed_point_type (type
))
2842 type
= type
->fixed_point_type_base_type ();
2844 enum bfd_endian byte_order
= type_byte_order (type
);
2845 enum type_code code
= type
->code ();
2846 int len
= type
->length ();
2847 int nosign
= type
->is_unsigned ();
2851 case TYPE_CODE_TYPEDEF
:
2852 return unpack_long (check_typedef (type
), valaddr
);
2853 case TYPE_CODE_ENUM
:
2854 case TYPE_CODE_FLAGS
:
2855 case TYPE_CODE_BOOL
:
2857 case TYPE_CODE_CHAR
:
2858 case TYPE_CODE_RANGE
:
2859 case TYPE_CODE_MEMBERPTR
:
2863 if (type
->bit_size_differs_p ())
2865 unsigned bit_off
= type
->bit_offset ();
2866 unsigned bit_size
= type
->bit_size ();
2869 /* unpack_bits_as_long doesn't handle this case the
2870 way we'd like, so handle it here. */
2874 result
= unpack_bits_as_long (type
, valaddr
, bit_off
, bit_size
);
2879 result
= extract_unsigned_integer (valaddr
, len
, byte_order
);
2881 result
= extract_signed_integer (valaddr
, len
, byte_order
);
2883 if (code
== TYPE_CODE_RANGE
)
2884 result
+= type
->bounds ()->bias
;
2889 case TYPE_CODE_DECFLOAT
:
2890 return target_float_to_longest (valaddr
, type
);
2892 case TYPE_CODE_FIXED_POINT
:
2895 vq
.read_fixed_point (gdb::make_array_view (valaddr
, len
),
2897 type
->fixed_point_scaling_factor ());
2899 gdb_mpz vz
= vq
.as_integer ();
2900 return vz
.as_integer
<LONGEST
> ();
2905 case TYPE_CODE_RVALUE_REF
:
2906 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2907 whether we want this to be true eventually. */
2908 return extract_typed_address (valaddr
, type
);
2911 error (_("Value can't be converted to integer."));
2915 /* Unpack raw data (copied from debugee, target byte order) at VALADDR
2916 as a CORE_ADDR, assuming the raw data is described by type TYPE.
2917 We don't assume any alignment for the raw data. Return value is in
2920 If you want functions and arrays to be coerced to pointers, and
2921 references to be dereferenced, call value_as_address() instead.
2923 C++: It is assumed that the front-end has taken care of
2924 all matters concerning pointers to members. A pointer
2925 to member which reaches here is considered to be equivalent
2926 to an INT (or some size). After all, it is only an offset. */
2929 unpack_pointer (struct type
*type
, const gdb_byte
*valaddr
)
2931 /* Assume a CORE_ADDR can fit in a LONGEST (for now). Not sure
2932 whether we want this to be true eventually. */
2933 return unpack_long (type
, valaddr
);
2937 is_floating_value (struct value
*val
)
2939 struct type
*type
= check_typedef (val
->type ());
2941 if (is_floating_type (type
))
2943 if (!target_float_is_valid (val
->contents ().data (), type
))
2944 error (_("Invalid floating value found in program."));
2952 /* Get the value of the FIELDNO'th field (which must be static) of
2956 value_static_field (struct type
*type
, int fieldno
)
2958 struct value
*retval
;
2960 switch (type
->field (fieldno
).loc_kind ())
2962 case FIELD_LOC_KIND_PHYSADDR
:
2963 retval
= value_at_lazy (type
->field (fieldno
).type (),
2964 type
->field (fieldno
).loc_physaddr ());
2966 case FIELD_LOC_KIND_PHYSNAME
:
2968 const char *phys_name
= type
->field (fieldno
).loc_physname ();
2969 /* type->field (fieldno).name (); */
2970 struct block_symbol sym
= lookup_symbol (phys_name
, nullptr,
2971 SEARCH_VAR_DOMAIN
, nullptr);
2973 if (sym
.symbol
== NULL
)
2975 /* With some compilers, e.g. HP aCC, static data members are
2976 reported as non-debuggable symbols. */
2977 bound_minimal_symbol msym
2978 = lookup_minimal_symbol (current_program_space
, phys_name
);
2979 struct type
*field_type
= type
->field (fieldno
).type ();
2982 retval
= value::allocate_optimized_out (field_type
);
2984 retval
= value_at_lazy (field_type
, msym
.value_address ());
2987 retval
= value_of_variable (sym
.symbol
, sym
.block
);
2991 gdb_assert_not_reached ("unexpected field location kind");
2997 /* Change the enclosing type of a value object VAL to NEW_ENCL_TYPE.
2998 You have to be careful here, since the size of the data area for the value
2999 is set by the length of the enclosing type. So if NEW_ENCL_TYPE is bigger
3000 than the old enclosing type, you have to allocate more space for the
3004 value::set_enclosing_type (struct type
*new_encl_type
)
3006 if (new_encl_type
->length () > enclosing_type ()->length ())
3008 check_type_length_before_alloc (new_encl_type
);
3009 m_contents
.reset ((gdb_byte
*) xrealloc (m_contents
.release (),
3010 new_encl_type
->length ()));
3013 m_enclosing_type
= new_encl_type
;
3019 value::primitive_field (LONGEST offset
, int fieldno
, struct type
*arg_type
)
3023 int unit_size
= gdbarch_addressable_memory_unit_size (arch ());
3025 arg_type
= check_typedef (arg_type
);
3026 type
= arg_type
->field (fieldno
).type ();
3028 /* Call check_typedef on our type to make sure that, if TYPE
3029 is a TYPE_CODE_TYPEDEF, its length is set to the length
3030 of the target type instead of zero. However, we do not
3031 replace the typedef type by the target type, because we want
3032 to keep the typedef in order to be able to print the type
3033 description correctly. */
3034 check_typedef (type
);
3036 if (arg_type
->field (fieldno
).bitsize ())
3038 /* Handle packed fields.
3040 Create a new value for the bitfield, with bitpos and bitsize
3041 set. If possible, arrange offset and bitpos so that we can
3042 do a single aligned read of the size of the containing type.
3043 Otherwise, adjust offset to the byte containing the first
3044 bit. Assume that the address, offset, and embedded offset
3045 are sufficiently aligned. */
3047 LONGEST bitpos
= arg_type
->field (fieldno
).loc_bitpos ();
3048 LONGEST container_bitsize
= type
->length () * 8;
3050 v
= value::allocate_lazy (type
);
3051 v
->set_bitsize (arg_type
->field (fieldno
).bitsize ());
3052 if ((bitpos
% container_bitsize
) + v
->bitsize () <= container_bitsize
3053 && type
->length () <= (int) sizeof (LONGEST
))
3054 v
->set_bitpos (bitpos
% container_bitsize
);
3056 v
->set_bitpos (bitpos
% 8);
3057 v
->set_offset ((embedded_offset ()
3059 + (bitpos
- v
->bitpos ()) / 8));
3060 v
->set_parent (this);
3064 else if (fieldno
< TYPE_N_BASECLASSES (arg_type
))
3066 /* This field is actually a base subobject, so preserve the
3067 entire object's contents for later references to virtual
3071 /* Lazy register values with offsets are not supported. */
3072 if (this->lval () == lval_register
&& lazy ())
3075 /* We special case virtual inheritance here because this
3076 requires access to the contents, which we would rather avoid
3077 for references to ordinary fields of unavailable values. */
3078 if (BASETYPE_VIA_VIRTUAL (arg_type
, fieldno
))
3079 boffset
= baseclass_offset (arg_type
, fieldno
,
3080 contents ().data (),
3085 boffset
= arg_type
->field (fieldno
).loc_bitpos () / 8;
3088 v
= value::allocate_lazy (enclosing_type ());
3091 v
= value::allocate (enclosing_type ());
3092 contents_copy_raw (v
, 0, 0, enclosing_type ()->length ());
3094 v
->deprecated_set_type (type
);
3095 v
->set_offset (this->offset ());
3096 v
->set_embedded_offset (offset
+ embedded_offset () + boffset
);
3098 else if (NULL
!= TYPE_DATA_LOCATION (type
))
3100 /* Field is a dynamic data member. */
3102 gdb_assert (0 == offset
);
3103 /* We expect an already resolved data location. */
3104 gdb_assert (TYPE_DATA_LOCATION (type
)->is_constant ());
3105 /* For dynamic data types defer memory allocation
3106 until we actual access the value. */
3107 v
= value::allocate_lazy (type
);
3111 /* Plain old data member */
3112 offset
+= (arg_type
->field (fieldno
).loc_bitpos ()
3113 / (HOST_CHAR_BIT
* unit_size
));
3115 /* Lazy register values with offsets are not supported. */
3116 if (this->lval () == lval_register
&& lazy ())
3120 v
= value::allocate_lazy (type
);
3123 v
= value::allocate (type
);
3124 contents_copy_raw (v
, v
->embedded_offset (),
3125 embedded_offset () + offset
,
3126 type_length_units (type
));
3128 v
->set_offset (this->offset () + offset
+ embedded_offset ());
3130 v
->set_component_location (this);
3134 /* Given a value ARG1 of a struct or union type,
3135 extract and return the value of one of its (non-static) fields.
3136 FIELDNO says which field. */
3139 value_field (struct value
*arg1
, int fieldno
)
3141 return arg1
->primitive_field (0, fieldno
, arg1
->type ());
3144 /* Return a non-virtual function as a value.
3145 F is the list of member functions which contains the desired method.
3146 J is an index into F which provides the desired method.
3148 We only use the symbol for its address, so be happy with either a
3149 full symbol or a minimal symbol. */
3152 value_fn_field (struct value
**arg1p
, struct fn_field
*f
,
3153 int j
, struct type
*type
,
3157 struct type
*ftype
= TYPE_FN_FIELD_TYPE (f
, j
);
3158 const char *physname
= TYPE_FN_FIELD_PHYSNAME (f
, j
);
3160 bound_minimal_symbol msym
;
3162 sym
= lookup_symbol (physname
, nullptr, SEARCH_FUNCTION_DOMAIN
,
3166 msym
= lookup_minimal_symbol (current_program_space
, physname
);
3167 if (msym
.minsym
== NULL
)
3171 v
= value::allocate (ftype
);
3172 v
->set_lval (lval_memory
);
3175 v
->set_address (sym
->value_block ()->entry_pc ());
3179 /* The minimal symbol might point to a function descriptor;
3180 resolve it to the actual code address instead. */
3181 struct objfile
*objfile
= msym
.objfile
;
3182 struct gdbarch
*gdbarch
= objfile
->arch ();
3184 v
->set_address (gdbarch_convert_from_func_ptr_addr
3185 (gdbarch
, msym
.value_address (),
3186 current_inferior ()->top_target ()));
3191 if (type
!= (*arg1p
)->type ())
3192 *arg1p
= value_ind (value_cast (lookup_pointer_type (type
),
3193 value_addr (*arg1p
)));
3195 /* Move the `this' pointer according to the offset.
3196 (*arg1p)->offset () += offset; */
3207 unpack_bits_as_long (struct type
*field_type
, const gdb_byte
*valaddr
,
3208 LONGEST bitpos
, LONGEST bitsize
)
3210 enum bfd_endian byte_order
= type_byte_order (field_type
);
3215 LONGEST read_offset
;
3217 /* Read the minimum number of bytes required; there may not be
3218 enough bytes to read an entire ULONGEST. */
3219 field_type
= check_typedef (field_type
);
3221 bytes_read
= ((bitpos
% 8) + bitsize
+ 7) / 8;
3224 bytes_read
= field_type
->length ();
3225 bitsize
= 8 * bytes_read
;
3228 read_offset
= bitpos
/ 8;
3230 val
= extract_unsigned_integer (valaddr
+ read_offset
,
3231 bytes_read
, byte_order
);
3233 /* Extract bits. See comment above. */
3235 if (byte_order
== BFD_ENDIAN_BIG
)
3236 lsbcount
= (bytes_read
* 8 - bitpos
% 8 - bitsize
);
3238 lsbcount
= (bitpos
% 8);
3241 /* If the field does not entirely fill a LONGEST, then zero the sign bits.
3242 If the field is signed, and is negative, then sign extend. */
3244 if (bitsize
< 8 * (int) sizeof (val
))
3246 valmask
= (((ULONGEST
) 1) << bitsize
) - 1;
3248 if (!field_type
->is_unsigned ())
3250 if (val
& (valmask
^ (valmask
>> 1)))
3260 /* Unpack a field FIELDNO of the specified TYPE, from the object at
3261 VALADDR + EMBEDDED_OFFSET. VALADDR points to the contents of
3262 ORIGINAL_VALUE, which must not be NULL. See
3263 unpack_value_bits_as_long for more details. */
3266 unpack_value_field_as_long (struct type
*type
, const gdb_byte
*valaddr
,
3267 LONGEST embedded_offset
, int fieldno
,
3268 const struct value
*val
, LONGEST
*result
)
3270 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3271 int bitsize
= type
->field (fieldno
).bitsize ();
3272 struct type
*field_type
= type
->field (fieldno
).type ();
3275 gdb_assert (val
!= NULL
);
3277 bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3278 if (val
->bits_any_optimized_out (bit_offset
, bitsize
)
3279 || !val
->bits_available (bit_offset
, bitsize
))
3282 *result
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3287 /* Unpack a field FIELDNO of the specified TYPE, from the anonymous
3288 object at VALADDR. See unpack_bits_as_long for more details. */
3291 unpack_field_as_long (struct type
*type
, const gdb_byte
*valaddr
, int fieldno
)
3293 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3294 int bitsize
= type
->field (fieldno
).bitsize ();
3295 struct type
*field_type
= type
->field (fieldno
).type ();
3297 return unpack_bits_as_long (field_type
, valaddr
, bitpos
, bitsize
);
3303 value::unpack_bitfield (struct value
*dest_val
,
3304 LONGEST bitpos
, LONGEST bitsize
,
3305 const gdb_byte
*valaddr
, LONGEST embedded_offset
)
3308 enum bfd_endian byte_order
;
3311 struct type
*field_type
= dest_val
->type ();
3313 byte_order
= type_byte_order (field_type
);
3315 /* First, unpack and sign extend the bitfield as if it was wholly
3316 valid. Optimized out/unavailable bits are read as zero, but
3317 that's OK, as they'll end up marked below. If the VAL is
3318 wholly-invalid we may have skipped allocating its contents,
3319 though. See value::allocate_optimized_out. */
3320 if (valaddr
!= NULL
)
3324 num
= unpack_bits_as_long (field_type
, valaddr
+ embedded_offset
,
3326 store_signed_integer (dest_val
->contents_raw ().data (),
3327 field_type
->length (), byte_order
, num
);
3330 /* Now copy the optimized out / unavailability ranges to the right
3332 src_bit_offset
= embedded_offset
* TARGET_CHAR_BIT
+ bitpos
;
3333 if (byte_order
== BFD_ENDIAN_BIG
)
3334 dst_bit_offset
= field_type
->length () * TARGET_CHAR_BIT
- bitsize
;
3337 ranges_copy_adjusted (dest_val
, dst_bit_offset
, src_bit_offset
, bitsize
);
3340 /* Return a new value with type TYPE, which is FIELDNO field of the
3341 object at VALADDR + EMBEDDEDOFFSET. VALADDR points to the contents
3342 of VAL. If the VAL's contents required to extract the bitfield
3343 from are unavailable/optimized out, the new value is
3344 correspondingly marked unavailable/optimized out. */
3347 value_field_bitfield (struct type
*type
, int fieldno
,
3348 const gdb_byte
*valaddr
,
3349 LONGEST embedded_offset
, const struct value
*val
)
3351 int bitpos
= type
->field (fieldno
).loc_bitpos ();
3352 int bitsize
= type
->field (fieldno
).bitsize ();
3353 struct value
*res_val
= value::allocate (type
->field (fieldno
).type ());
3355 val
->unpack_bitfield (res_val
, bitpos
, bitsize
, valaddr
, embedded_offset
);
3360 /* Modify the value of a bitfield. ADDR points to a block of memory in
3361 target byte order; the bitfield starts in the byte pointed to. FIELDVAL
3362 is the desired value of the field, in host byte order. BITPOS and BITSIZE
3363 indicate which bits (in target bit order) comprise the bitfield.
3364 Requires 0 < BITSIZE <= lbits, 0 <= BITPOS % 8 + BITSIZE <= lbits, and
3365 0 <= BITPOS, where lbits is the size of a LONGEST in bits. */
3368 modify_field (struct type
*type
, gdb_byte
*addr
,
3369 LONGEST fieldval
, LONGEST bitpos
, LONGEST bitsize
)
3371 enum bfd_endian byte_order
= type_byte_order (type
);
3373 ULONGEST mask
= (ULONGEST
) -1 >> (8 * sizeof (ULONGEST
) - bitsize
);
3376 /* Normalize BITPOS. */
3380 /* If a negative fieldval fits in the field in question, chop
3381 off the sign extension bits. */
3382 if ((~fieldval
& ~(mask
>> 1)) == 0)
3385 /* Warn if value is too big to fit in the field in question. */
3386 if (0 != (fieldval
& ~mask
))
3388 /* FIXME: would like to include fieldval in the message, but
3389 we don't have a sprintf_longest. */
3390 warning (_("Value does not fit in %s bits."), plongest (bitsize
));
3392 /* Truncate it, otherwise adjoining fields may be corrupted. */
3396 /* Ensure no bytes outside of the modified ones get accessed as it may cause
3397 false valgrind reports. */
3399 bytesize
= (bitpos
+ bitsize
+ 7) / 8;
3400 oword
= extract_unsigned_integer (addr
, bytesize
, byte_order
);
3402 /* Shifting for bit field depends on endianness of the target machine. */
3403 if (byte_order
== BFD_ENDIAN_BIG
)
3404 bitpos
= bytesize
* 8 - bitpos
- bitsize
;
3406 oword
&= ~(mask
<< bitpos
);
3407 oword
|= fieldval
<< bitpos
;
3409 store_unsigned_integer (addr
, bytesize
, byte_order
, oword
);
3412 /* Pack NUM into BUF using a target format of TYPE. */
3415 pack_long (gdb_byte
*buf
, struct type
*type
, LONGEST num
)
3417 enum bfd_endian byte_order
= type_byte_order (type
);
3420 type
= check_typedef (type
);
3421 len
= type
->length ();
3423 switch (type
->code ())
3425 case TYPE_CODE_RANGE
:
3426 num
-= type
->bounds ()->bias
;
3429 case TYPE_CODE_CHAR
:
3430 case TYPE_CODE_ENUM
:
3431 case TYPE_CODE_FLAGS
:
3432 case TYPE_CODE_BOOL
:
3433 case TYPE_CODE_MEMBERPTR
:
3434 if (type
->bit_size_differs_p ())
3436 unsigned bit_off
= type
->bit_offset ();
3437 unsigned bit_size
= type
->bit_size ();
3438 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3441 store_signed_integer (buf
, len
, byte_order
, num
);
3445 case TYPE_CODE_RVALUE_REF
:
3447 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3451 case TYPE_CODE_DECFLOAT
:
3452 target_float_from_longest (buf
, type
, num
);
3456 error (_("Unexpected type (%d) encountered for integer constant."),
3462 /* Pack NUM into BUF using a target format of TYPE. */
3465 pack_unsigned_long (gdb_byte
*buf
, struct type
*type
, ULONGEST num
)
3468 enum bfd_endian byte_order
;
3470 type
= check_typedef (type
);
3471 len
= type
->length ();
3472 byte_order
= type_byte_order (type
);
3474 switch (type
->code ())
3477 case TYPE_CODE_CHAR
:
3478 case TYPE_CODE_ENUM
:
3479 case TYPE_CODE_FLAGS
:
3480 case TYPE_CODE_BOOL
:
3481 case TYPE_CODE_RANGE
:
3482 case TYPE_CODE_MEMBERPTR
:
3483 if (type
->bit_size_differs_p ())
3485 unsigned bit_off
= type
->bit_offset ();
3486 unsigned bit_size
= type
->bit_size ();
3487 num
&= ((ULONGEST
) 1 << bit_size
) - 1;
3490 store_unsigned_integer (buf
, len
, byte_order
, num
);
3494 case TYPE_CODE_RVALUE_REF
:
3496 store_typed_address (buf
, type
, (CORE_ADDR
) num
);
3500 case TYPE_CODE_DECFLOAT
:
3501 target_float_from_ulongest (buf
, type
, num
);
3505 error (_("Unexpected type (%d) encountered "
3506 "for unsigned integer constant."),
3514 value::zero (struct type
*type
, enum lval_type lv
)
3516 struct value
*val
= value::allocate_lazy (type
);
3518 val
->set_lval (lv
== lval_computed
? not_lval
: lv
);
3519 val
->m_is_zero
= true;
3523 /* Convert C numbers into newly allocated values. */
3526 value_from_longest (struct type
*type
, LONGEST num
)
3528 struct value
*val
= value::allocate (type
);
3530 pack_long (val
->contents_raw ().data (), type
, num
);
3535 /* Convert C unsigned numbers into newly allocated values. */
3538 value_from_ulongest (struct type
*type
, ULONGEST num
)
3540 struct value
*val
= value::allocate (type
);
3542 pack_unsigned_long (val
->contents_raw ().data (), type
, num
);
3550 value_from_mpz (struct type
*type
, const gdb_mpz
&v
)
3552 struct type
*real_type
= check_typedef (type
);
3554 const gdb_mpz
*val
= &v
;
3556 if (real_type
->code () == TYPE_CODE_RANGE
&& type
->bounds ()->bias
!= 0)
3560 storage
-= type
->bounds ()->bias
;
3563 if (type
->bit_size_differs_p ())
3565 unsigned bit_off
= type
->bit_offset ();
3566 unsigned bit_size
= type
->bit_size ();
3568 if (val
!= &storage
)
3574 storage
.mask (bit_size
);
3575 storage
<<= bit_off
;
3578 struct value
*result
= value::allocate (type
);
3579 val
->truncate (result
->contents_raw (), type_byte_order (type
),
3580 type
->is_unsigned ());
3584 /* Create a value representing a pointer of type TYPE to the address
3588 value_from_pointer (struct type
*type
, CORE_ADDR addr
)
3590 struct value
*val
= value::allocate (type
);
3592 store_typed_address (val
->contents_raw ().data (),
3593 check_typedef (type
), addr
);
3597 /* Create and return a value object of TYPE containing the value D. The
3598 TYPE must be of TYPE_CODE_FLT, and must be large enough to hold D once
3599 it is converted to target format. */
3602 value_from_host_double (struct type
*type
, double d
)
3604 struct value
*value
= value::allocate (type
);
3605 gdb_assert (type
->code () == TYPE_CODE_FLT
);
3606 target_float_from_host_double (value
->contents_raw ().data (),
3611 /* Create a value of type TYPE whose contents come from VALADDR, if it
3612 is non-null, and whose memory address (in the inferior) is
3613 ADDRESS. The type of the created value may differ from the passed
3614 type TYPE. Make sure to retrieve values new type after this call.
3615 Note that TYPE is not passed through resolve_dynamic_type; this is
3616 a special API intended for use only by Ada. */
3619 value_from_contents_and_address_unresolved (struct type
*type
,
3620 const gdb_byte
*valaddr
,
3625 if (valaddr
== NULL
)
3626 v
= value::allocate_lazy (type
);
3628 v
= value_from_contents (type
, valaddr
);
3629 v
->set_lval (lval_memory
);
3630 v
->set_address (address
);
3634 /* Create a value of type TYPE whose contents come from VALADDR, if it
3635 is non-null, and whose memory address (in the inferior) is
3636 ADDRESS. The type of the created value may differ from the passed
3637 type TYPE. Make sure to retrieve values new type after this call. */
3640 value_from_contents_and_address (struct type
*type
,
3641 const gdb_byte
*valaddr
,
3643 const frame_info_ptr
&frame
)
3645 gdb::array_view
<const gdb_byte
> view
;
3646 if (valaddr
!= nullptr)
3647 view
= gdb::make_array_view (valaddr
, type
->length ());
3648 struct type
*resolved_type
= resolve_dynamic_type (type
, view
, address
,
3650 struct type
*resolved_type_no_typedef
= check_typedef (resolved_type
);
3653 if (resolved_type_no_typedef
->code () == TYPE_CODE_ARRAY
3654 && resolved_type_no_typedef
->bound_optimized_out ())
3656 /* Resolution found that the bounds are optimized out. In this
3657 case, mark the array itself as optimized-out. */
3658 v
= value::allocate_optimized_out (resolved_type
);
3660 else if (valaddr
== nullptr)
3661 v
= value::allocate_lazy (resolved_type
);
3663 v
= value_from_contents (resolved_type
, valaddr
);
3664 if (TYPE_DATA_LOCATION (resolved_type_no_typedef
) != NULL
3665 && TYPE_DATA_LOCATION (resolved_type_no_typedef
)->is_constant ())
3666 address
= TYPE_DATA_LOCATION_ADDR (resolved_type_no_typedef
);
3667 v
->set_lval (lval_memory
);
3668 v
->set_address (address
);
3672 /* Create a value of type TYPE holding the contents CONTENTS.
3673 The new value is `not_lval'. */
3676 value_from_contents (struct type
*type
, const gdb_byte
*contents
)
3678 struct value
*result
;
3680 result
= value::allocate (type
);
3681 memcpy (result
->contents_raw ().data (), contents
, type
->length ());
3685 /* Extract a value from the history file. Input will be of the form
3686 $digits or $$digits. See block comment above 'write_dollar_variable'
3690 value_from_history_ref (const char *h
, const char **endp
)
3702 /* Find length of numeral string. */
3703 for (; isdigit (h
[len
]); len
++)
3706 /* Make sure numeral string is not part of an identifier. */
3707 if (h
[len
] == '_' || isalpha (h
[len
]))
3710 /* Now collect the index value. */
3715 /* For some bizarre reason, "$$" is equivalent to "$$1",
3716 rather than to "$$0" as it ought to be! */
3724 index
= -strtol (&h
[2], &local_end
, 10);
3732 /* "$" is equivalent to "$0". */
3740 index
= strtol (&h
[1], &local_end
, 10);
3745 return access_value_history (index
);
3748 /* Get the component value (offset by OFFSET bytes) of a struct or
3749 union WHOLE. Component's type is TYPE. */
3752 value_from_component (struct value
*whole
, struct type
*type
, LONGEST offset
)
3756 if (whole
->lval () == lval_memory
&& whole
->lazy ())
3757 v
= value::allocate_lazy (type
);
3760 v
= value::allocate (type
);
3761 whole
->contents_copy (v
, v
->embedded_offset (),
3762 whole
->embedded_offset () + offset
,
3763 type_length_units (type
));
3765 v
->set_offset (whole
->offset () + offset
+ whole
->embedded_offset ());
3766 v
->set_component_location (whole
);
3774 value::from_component_bitsize (struct type
*type
,
3775 LONGEST bit_offset
, LONGEST bit_length
)
3777 gdb_assert (!lazy ());
3779 /* Preserve lvalue-ness if possible. This is needed to avoid
3780 array-printing failures (including crashes) when printing Ada
3781 arrays in programs compiled with -fgnat-encodings=all. */
3782 if ((bit_offset
% TARGET_CHAR_BIT
) == 0
3783 && (bit_length
% TARGET_CHAR_BIT
) == 0
3784 && bit_length
== TARGET_CHAR_BIT
* type
->length ())
3785 return value_from_component (this, type
, bit_offset
/ TARGET_CHAR_BIT
);
3787 struct value
*v
= value::allocate (type
);
3789 LONGEST dst_offset
= TARGET_CHAR_BIT
* v
->embedded_offset ();
3790 if (is_scalar_type (type
) && type_byte_order (type
) == BFD_ENDIAN_BIG
)
3791 dst_offset
+= TARGET_CHAR_BIT
* type
->length () - bit_length
;
3793 contents_copy_raw_bitwise (v
, dst_offset
,
3795 * embedded_offset ()
3802 coerce_ref_if_computed (const struct value
*arg
)
3804 const struct lval_funcs
*funcs
;
3806 if (!TYPE_IS_REFERENCE (check_typedef (arg
->type ())))
3809 if (arg
->lval () != lval_computed
)
3812 funcs
= arg
->computed_funcs ();
3813 if (funcs
->coerce_ref
== NULL
)
3816 return funcs
->coerce_ref (arg
);
3819 /* Look at value.h for description. */
3822 readjust_indirect_value_type (struct value
*value
, struct type
*enc_type
,
3823 const struct type
*original_type
,
3824 struct value
*original_value
,
3825 CORE_ADDR original_value_address
)
3827 gdb_assert (original_type
->is_pointer_or_reference ());
3829 struct type
*original_target_type
= original_type
->target_type ();
3830 gdb::array_view
<const gdb_byte
> view
;
3831 struct type
*resolved_original_target_type
3832 = resolve_dynamic_type (original_target_type
, view
,
3833 original_value_address
);
3835 /* Re-adjust type. */
3836 value
->deprecated_set_type (resolved_original_target_type
);
3838 /* Add embedding info. */
3839 value
->set_enclosing_type (enc_type
);
3840 value
->set_embedded_offset (original_value
->pointed_to_offset ());
3842 /* We may be pointing to an object of some derived type. */
3843 return value_full_object (value
, NULL
, 0, 0, 0);
3847 coerce_ref (struct value
*arg
)
3849 struct type
*value_type_arg_tmp
= check_typedef (arg
->type ());
3850 struct value
*retval
;
3851 struct type
*enc_type
;
3853 retval
= coerce_ref_if_computed (arg
);
3857 if (!TYPE_IS_REFERENCE (value_type_arg_tmp
))
3860 enc_type
= check_typedef (arg
->enclosing_type ());
3861 enc_type
= enc_type
->target_type ();
3863 CORE_ADDR addr
= unpack_pointer (arg
->type (), arg
->contents ().data ());
3864 retval
= value_at_lazy (enc_type
, addr
);
3865 enc_type
= retval
->type ();
3866 return readjust_indirect_value_type (retval
, enc_type
, value_type_arg_tmp
,
3871 coerce_array (struct value
*arg
)
3875 arg
= coerce_ref (arg
);
3876 type
= check_typedef (arg
->type ());
3878 switch (type
->code ())
3880 case TYPE_CODE_ARRAY
:
3881 if (!type
->is_vector () && current_language
->c_style_arrays_p ())
3882 arg
= value_coerce_array (arg
);
3884 case TYPE_CODE_FUNC
:
3885 arg
= value_coerce_function (arg
);
3892 /* Return the return value convention that will be used for the
3895 enum return_value_convention
3896 struct_return_convention (struct gdbarch
*gdbarch
,
3897 struct value
*function
, struct type
*value_type
)
3899 enum type_code code
= value_type
->code ();
3901 if (code
== TYPE_CODE_ERROR
)
3902 error (_("Function return type unknown."));
3904 /* Probe the architecture for the return-value convention. */
3905 return gdbarch_return_value_as_value (gdbarch
, function
, value_type
,
3909 /* Return true if the function returning the specified type is using
3910 the convention of returning structures in memory (passing in the
3911 address as a hidden first parameter). */
3914 using_struct_return (struct gdbarch
*gdbarch
,
3915 struct value
*function
, struct type
*value_type
)
3917 if (value_type
->code () == TYPE_CODE_VOID
)
3918 /* A void return value is never in memory. See also corresponding
3919 code in "print_return_value". */
3922 return (struct_return_convention (gdbarch
, function
, value_type
)
3923 != RETURN_VALUE_REGISTER_CONVENTION
);
3929 value::fetch_lazy_bitfield ()
3931 gdb_assert (bitsize () != 0);
3933 /* To read a lazy bitfield, read the entire enclosing value. This
3934 prevents reading the same block of (possibly volatile) memory once
3935 per bitfield. It would be even better to read only the containing
3936 word, but we have no way to record that just specific bits of a
3937 value have been fetched. */
3938 struct value
*parent
= this->parent ();
3940 if (parent
->lazy ())
3941 parent
->fetch_lazy ();
3943 parent
->unpack_bitfield (this, bitpos (), bitsize (),
3944 parent
->contents_for_printing ().data (),
3951 value::fetch_lazy_memory ()
3953 gdb_assert (m_lval
== lval_memory
);
3955 CORE_ADDR addr
= address ();
3956 struct type
*type
= check_typedef (enclosing_type ());
3958 /* Figure out how much we should copy from memory. Usually, this is just
3959 the size of the type, but, for arrays, we might only be loading a
3960 small part of the array (this is only done for very large arrays). */
3962 if (m_limited_length
> 0)
3964 gdb_assert (this->type ()->code () == TYPE_CODE_ARRAY
);
3965 len
= m_limited_length
;
3967 else if (type
->length () > 0)
3968 len
= type_length_units (type
);
3970 gdb_assert (len
>= 0);
3973 read_value_memory (this, 0, stack (), addr
,
3974 contents_all_raw ().data (), len
);
3976 /* If only part of an array was loaded, mark the rest as unavailable. */
3977 if (m_limited_length
> 0)
3978 mark_bytes_unavailable (m_limited_length
,
3979 m_enclosing_type
->length () - m_limited_length
);
3985 value::fetch_lazy_register ()
3987 struct type
*type
= check_typedef (this->type ());
3988 struct value
*new_val
= this;
3990 scoped_value_mark mark
;
3992 /* Offsets are not supported here; lazy register values must
3993 refer to the entire register. */
3994 gdb_assert (offset () == 0);
3996 while (new_val
->lval () == lval_register
&& new_val
->lazy ())
3998 frame_id next_frame_id
= new_val
->next_frame_id ();
3999 frame_info_ptr next_frame
= frame_find_by_id (next_frame_id
);
4000 gdb_assert (next_frame
!= NULL
);
4002 int regnum
= new_val
->regnum ();
4004 /* Convertible register routines are used for multi-register
4005 values and for interpretation in different types
4006 (e.g. float or int from a double register). Lazy
4007 register values should have the register's natural type,
4008 so they do not apply. */
4009 gdb_assert (!gdbarch_convert_register_p (get_frame_arch (next_frame
),
4012 new_val
= frame_unwind_register_value (next_frame
, regnum
);
4014 /* If we get another lazy lval_register value, it means the
4015 register is found by reading it from NEXT_FRAME's next frame.
4016 frame_unwind_register_value should never return a value with
4017 the frame id pointing to NEXT_FRAME. If it does, it means we
4018 either have two consecutive frames with the same frame id
4019 in the frame chain, or some code is trying to unwind
4020 behind get_prev_frame's back (e.g., a frame unwind
4021 sniffer trying to unwind), bypassing its validations. In
4022 any case, it should always be an internal error to end up
4023 in this situation. */
4024 if (new_val
->lval () == lval_register
4026 && new_val
->next_frame_id () == next_frame_id
)
4027 internal_error (_("infinite loop while fetching a register"));
4030 /* If it's still lazy (for instance, a saved register on the
4031 stack), fetch it. */
4032 if (new_val
->lazy ())
4033 new_val
->fetch_lazy ();
4035 /* Copy the contents and the unavailability/optimized-out
4036 meta-data from NEW_VAL to VAL. */
4038 new_val
->contents_copy (this, embedded_offset (),
4039 new_val
->embedded_offset (),
4040 type_length_units (type
));
4044 frame_info_ptr frame
= frame_find_by_id (this->next_frame_id ());
4045 frame
= get_prev_frame_always (frame
);
4046 int regnum
= this->regnum ();
4047 gdbarch
*gdbarch
= get_frame_arch (frame
);
4049 string_file debug_file
;
4050 gdb_printf (&debug_file
,
4051 "(frame=%d, regnum=%d(%s), ...) ",
4052 frame_relative_level (frame
), regnum
,
4053 user_reg_map_regnum_to_name (gdbarch
, regnum
));
4055 gdb_printf (&debug_file
, "->");
4056 if (new_val
->optimized_out ())
4058 gdb_printf (&debug_file
, " ");
4059 val_print_optimized_out (new_val
, &debug_file
);
4063 if (new_val
->lval () == lval_register
)
4064 gdb_printf (&debug_file
, " register=%d", new_val
->regnum ());
4065 else if (new_val
->lval () == lval_memory
)
4066 gdb_printf (&debug_file
, " address=%s",
4068 new_val
->address ()));
4070 gdb_printf (&debug_file
, " computed");
4072 if (new_val
->entirely_available ())
4075 gdb::array_view
<const gdb_byte
> buf
= new_val
->contents ();
4077 gdb_printf (&debug_file
, " bytes=");
4078 gdb_printf (&debug_file
, "[");
4079 for (i
= 0; i
< register_size (gdbarch
, regnum
); i
++)
4080 gdb_printf (&debug_file
, "%02x", buf
[i
]);
4081 gdb_printf (&debug_file
, "]");
4083 else if (new_val
->entirely_unavailable ())
4084 gdb_printf (&debug_file
, " unavailable");
4086 gdb_printf (&debug_file
, " partly unavailable");
4089 frame_debug_printf ("%s", debug_file
.c_str ());
4096 value::fetch_lazy ()
4098 gdb_assert (lazy ());
4099 allocate_contents (true);
4100 /* A value is either lazy, or fully fetched. The
4101 availability/validity is only established as we try to fetch a
4103 gdb_assert (m_optimized_out
.empty ());
4104 gdb_assert (m_unavailable
.empty ());
4109 else if (bitsize ())
4110 fetch_lazy_bitfield ();
4111 else if (this->lval () == lval_memory
)
4112 fetch_lazy_memory ();
4113 else if (this->lval () == lval_register
)
4114 fetch_lazy_register ();
4115 else if (this->lval () == lval_computed
4116 && computed_funcs ()->read
!= NULL
)
4117 computed_funcs ()->read (this);
4119 internal_error (_("Unexpected lazy value type."));
4127 pseudo_from_raw_part (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4128 int raw_reg_num
, int raw_offset
)
4130 value
*pseudo_reg_val
4131 = value::allocate_register (next_frame
, pseudo_reg_num
);
4132 value
*raw_reg_val
= value_of_register (raw_reg_num
, next_frame
);
4133 raw_reg_val
->contents_copy (pseudo_reg_val
, 0, raw_offset
,
4134 pseudo_reg_val
->type ()->length ());
4135 return pseudo_reg_val
;
4141 pseudo_to_raw_part (const frame_info_ptr
&next_frame
,
4142 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4143 int raw_reg_num
, int raw_offset
)
4146 = register_size (frame_unwind_arch (next_frame
), raw_reg_num
);
4148 /* When overflowing a register, put_frame_register_bytes writes to the
4149 subsequent registers. We don't want that behavior here, so make sure
4150 the write is wholly within register RAW_REG_NUM. */
4151 gdb_assert (raw_offset
+ pseudo_buf
.size () <= raw_reg_size
);
4152 put_frame_register_bytes (next_frame
, raw_reg_num
, raw_offset
, pseudo_buf
);
4158 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4159 int raw_reg_1_num
, int raw_reg_2_num
)
4161 value
*pseudo_reg_val
4162 = value::allocate_register (next_frame
, pseudo_reg_num
);
4165 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4166 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4167 raw_reg_1_val
->type ()->length ());
4168 dst_offset
+= raw_reg_1_val
->type ()->length ();
4170 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4171 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4172 raw_reg_2_val
->type ()->length ());
4173 dst_offset
+= raw_reg_2_val
->type ()->length ();
4175 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4177 return pseudo_reg_val
;
4183 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4184 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4185 int raw_reg_1_num
, int raw_reg_2_num
)
4188 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4190 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4191 put_frame_register (next_frame
, raw_reg_1_num
,
4192 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4193 src_offset
+= raw_reg_1_size
;
4195 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4196 put_frame_register (next_frame
, raw_reg_2_num
,
4197 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4198 src_offset
+= raw_reg_2_size
;
4200 gdb_assert (src_offset
== pseudo_buf
.size ());
4206 pseudo_from_concat_raw (const frame_info_ptr
&next_frame
, int pseudo_reg_num
,
4207 int raw_reg_1_num
, int raw_reg_2_num
,
4210 value
*pseudo_reg_val
4211 = value::allocate_register (next_frame
, pseudo_reg_num
);
4214 value
*raw_reg_1_val
= value_of_register (raw_reg_1_num
, next_frame
);
4215 raw_reg_1_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4216 raw_reg_1_val
->type ()->length ());
4217 dst_offset
+= raw_reg_1_val
->type ()->length ();
4219 value
*raw_reg_2_val
= value_of_register (raw_reg_2_num
, next_frame
);
4220 raw_reg_2_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4221 raw_reg_2_val
->type ()->length ());
4222 dst_offset
+= raw_reg_2_val
->type ()->length ();
4224 value
*raw_reg_3_val
= value_of_register (raw_reg_3_num
, next_frame
);
4225 raw_reg_3_val
->contents_copy (pseudo_reg_val
, dst_offset
, 0,
4226 raw_reg_3_val
->type ()->length ());
4227 dst_offset
+= raw_reg_3_val
->type ()->length ();
4229 gdb_assert (dst_offset
== pseudo_reg_val
->type ()->length ());
4231 return pseudo_reg_val
;
4237 pseudo_to_concat_raw (const frame_info_ptr
&next_frame
,
4238 gdb::array_view
<const gdb_byte
> pseudo_buf
,
4239 int raw_reg_1_num
, int raw_reg_2_num
, int raw_reg_3_num
)
4242 gdbarch
*arch
= frame_unwind_arch (next_frame
);
4244 int raw_reg_1_size
= register_size (arch
, raw_reg_1_num
);
4245 put_frame_register (next_frame
, raw_reg_1_num
,
4246 pseudo_buf
.slice (src_offset
, raw_reg_1_size
));
4247 src_offset
+= raw_reg_1_size
;
4249 int raw_reg_2_size
= register_size (arch
, raw_reg_2_num
);
4250 put_frame_register (next_frame
, raw_reg_2_num
,
4251 pseudo_buf
.slice (src_offset
, raw_reg_2_size
));
4252 src_offset
+= raw_reg_2_size
;
4254 int raw_reg_3_size
= register_size (arch
, raw_reg_3_num
);
4255 put_frame_register (next_frame
, raw_reg_3_num
,
4256 pseudo_buf
.slice (src_offset
, raw_reg_3_size
));
4257 src_offset
+= raw_reg_3_size
;
4259 gdb_assert (src_offset
== pseudo_buf
.size ());
4262 /* Implementation of the convenience function $_isvoid. */
4264 static struct value
*
4265 isvoid_internal_fn (struct gdbarch
*gdbarch
,
4266 const struct language_defn
*language
,
4267 void *cookie
, int argc
, struct value
**argv
)
4272 error (_("You must provide one argument for $_isvoid."));
4274 ret
= argv
[0]->type ()->code () == TYPE_CODE_VOID
;
4276 return value_from_longest (builtin_type (gdbarch
)->builtin_int
, ret
);
4279 /* Implementation of the convenience function $_creal. Extracts the
4280 real part from a complex number. */
4282 static struct value
*
4283 creal_internal_fn (struct gdbarch
*gdbarch
,
4284 const struct language_defn
*language
,
4285 void *cookie
, int argc
, struct value
**argv
,
4289 error (_("You must provide one argument for $_creal."));
4291 value
*cval
= argv
[0];
4292 type
*ctype
= check_typedef (cval
->type ());
4293 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4294 error (_("expected a complex number"));
4295 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
4296 return value::zero (ctype
->target_type (), not_lval
);
4297 return value_real_part (cval
);
4300 /* Implementation of the convenience function $_cimag. Extracts the
4301 imaginary part from a complex number. */
4303 static struct value
*
4304 cimag_internal_fn (struct gdbarch
*gdbarch
,
4305 const struct language_defn
*language
,
4306 void *cookie
, int argc
,
4307 struct value
**argv
, enum noside noside
)
4310 error (_("You must provide one argument for $_cimag."));
4312 value
*cval
= argv
[0];
4313 type
*ctype
= check_typedef (cval
->type ());
4314 if (ctype
->code () != TYPE_CODE_COMPLEX
)
4315 error (_("expected a complex number"));
4316 if (noside
== EVAL_AVOID_SIDE_EFFECTS
)
4317 return value::zero (ctype
->target_type (), not_lval
);
4318 return value_imaginary_part (cval
);
4325 /* Test the ranges_contain function. */
4328 test_ranges_contain ()
4330 std::vector
<range
> ranges
;
4336 ranges
.push_back (r
);
4341 ranges
.push_back (r
);
4344 SELF_CHECK (!ranges_contain (ranges
, 2, 5));
4346 SELF_CHECK (ranges_contain (ranges
, 9, 5));
4348 SELF_CHECK (ranges_contain (ranges
, 10, 2));
4350 SELF_CHECK (ranges_contain (ranges
, 10, 5));
4352 SELF_CHECK (ranges_contain (ranges
, 13, 6));
4354 SELF_CHECK (ranges_contain (ranges
, 14, 5));
4356 SELF_CHECK (!ranges_contain (ranges
, 15, 4));
4358 SELF_CHECK (!ranges_contain (ranges
, 16, 4));
4360 SELF_CHECK (ranges_contain (ranges
, 16, 6));
4362 SELF_CHECK (ranges_contain (ranges
, 21, 1));
4364 SELF_CHECK (ranges_contain (ranges
, 21, 5));
4366 SELF_CHECK (!ranges_contain (ranges
, 26, 3));
4369 /* Check that RANGES contains the same ranges as EXPECTED. */
4372 check_ranges_vector (gdb::array_view
<const range
> ranges
,
4373 gdb::array_view
<const range
> expected
)
4375 return ranges
== expected
;
4378 /* Test the insert_into_bit_range_vector function. */
4381 test_insert_into_bit_range_vector ()
4383 std::vector
<range
> ranges
;
4387 insert_into_bit_range_vector (&ranges
, 10, 5);
4388 static const range expected
[] = {
4391 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4396 insert_into_bit_range_vector (&ranges
, 11, 4);
4397 static const range expected
= {10, 5};
4398 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4401 /* [10, 14] [20, 24] */
4403 insert_into_bit_range_vector (&ranges
, 20, 5);
4404 static const range expected
[] = {
4408 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4411 /* [10, 14] [17, 24] */
4413 insert_into_bit_range_vector (&ranges
, 17, 5);
4414 static const range expected
[] = {
4418 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4421 /* [2, 8] [10, 14] [17, 24] */
4423 insert_into_bit_range_vector (&ranges
, 2, 7);
4424 static const range expected
[] = {
4429 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4432 /* [2, 14] [17, 24] */
4434 insert_into_bit_range_vector (&ranges
, 9, 1);
4435 static const range expected
[] = {
4439 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4442 /* [2, 14] [17, 24] */
4444 insert_into_bit_range_vector (&ranges
, 9, 1);
4445 static const range expected
[] = {
4449 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4454 insert_into_bit_range_vector (&ranges
, 4, 30);
4455 static const range expected
= {2, 32};
4456 SELF_CHECK (check_ranges_vector (ranges
, expected
));
4463 type
*type
= builtin_type (current_inferior ()->arch ())->builtin_int
;
4465 /* Verify that we can copy an entirely optimized out value, that may not have
4466 its contents allocated. */
4467 value_ref_ptr val
= release_value (value::allocate_optimized_out (type
));
4468 value_ref_ptr copy
= release_value (val
->copy ());
4470 SELF_CHECK (val
->entirely_optimized_out ());
4471 SELF_CHECK (copy
->entirely_optimized_out ());
4474 } /* namespace selftests */
4475 #endif /* GDB_SELF_TEST */
4477 void _initialize_values ();
4479 _initialize_values ()
4481 cmd_list_element
*show_convenience_cmd
4482 = add_cmd ("convenience", no_class
, show_convenience
, _("\
4483 Debugger convenience (\"$foo\") variables and functions.\n\
4484 Convenience variables are created when you assign them values;\n\
4485 thus, \"set $foo=1\" gives \"$foo\" the value 1. Values may be any type.\n\
4487 A few convenience variables are given values automatically:\n\
4488 \"$_\"holds the last address examined with \"x\" or \"info lines\",\n\
4489 \"$__\" holds the contents of the last address examined with \"x\"."
4492 Convenience functions are defined via the Python API."
4495 add_alias_cmd ("conv", show_convenience_cmd
, no_class
, 1, &showlist
);
4497 add_cmd ("values", no_set_class
, show_values
, _("\
4498 Elements of value history around item number IDX (or last ten)."),
4501 add_com ("init-if-undefined", class_vars
, init_if_undefined_command
, _("\
4502 Initialize a convenience variable if necessary.\n\
4503 init-if-undefined VARIABLE = EXPRESSION\n\
4504 Set an internal VARIABLE to the result of the EXPRESSION if it does not\n\
4505 exist or does not contain a value. The EXPRESSION is not evaluated if the\n\
4506 VARIABLE is already initialized."));
4508 add_prefix_cmd ("function", no_class
, function_command
, _("\
4509 Placeholder command for showing help on convenience functions."),
4510 &functionlist
, 0, &cmdlist
);
4512 add_internal_function ("_isvoid", _("\
4513 Check whether an expression is void.\n\
4514 Usage: $_isvoid (expression)\n\
4515 Return 1 if the expression is void, zero otherwise."),
4516 isvoid_internal_fn
, NULL
);
4518 add_internal_function ("_creal", _("\
4519 Extract the real part of a complex number.\n\
4520 Usage: $_creal (expression)\n\
4521 Return the real part of a complex number, the type depends on the\n\
4522 type of a complex number."),
4523 creal_internal_fn
, NULL
);
4525 add_internal_function ("_cimag", _("\
4526 Extract the imaginary part of a complex number.\n\
4527 Usage: $_cimag (expression)\n\
4528 Return the imaginary part of a complex number, the type depends on the\n\
4529 type of a complex number."),
4530 cimag_internal_fn
, NULL
);
4532 add_setshow_zuinteger_unlimited_cmd ("max-value-size",
4533 class_support
, &max_value_size
, _("\
4534 Set maximum sized value gdb will load from the inferior."), _("\
4535 Show maximum sized value gdb will load from the inferior."), _("\
4536 Use this to control the maximum size, in bytes, of a value that gdb\n\
4537 will load from the inferior. Setting this value to 'unlimited'\n\
4538 disables checking.\n\
4539 Setting this does not invalidate already allocated values, it only\n\
4540 prevents future values, larger than this size, from being allocated."),
4542 show_max_value_size
,
4543 &setlist
, &showlist
);
4544 set_show_commands vsize_limit
4545 = add_setshow_zuinteger_unlimited_cmd ("varsize-limit", class_support
,
4546 &max_value_size
, _("\
4547 Set the maximum number of bytes allowed in a variable-size object."), _("\
4548 Show the maximum number of bytes allowed in a variable-size object."), _("\
4549 Attempts to access an object whose size is not a compile-time constant\n\
4550 and exceeds this limit will cause an error."),
4551 NULL
, NULL
, &setlist
, &showlist
);
4552 deprecate_cmd (vsize_limit
.set
, "set max-value-size");
4555 selftests::register_test ("ranges_contain", selftests::test_ranges_contain
);
4556 selftests::register_test ("insert_into_bit_range_vector",
4557 selftests::test_insert_into_bit_range_vector
);
4558 selftests::register_test ("value_copy", selftests::test_value_copy
);
4561 /* Destroy any values currently allocated in a final cleanup instead
4562 of leaving it to global destructors, because that may be too
4563 late. For example, the destructors of xmethod values call into
4564 the Python runtime. */
4565 add_final_cleanup ([] ()
4567 all_values
.clear ();