1 /* -*- mode: C; c-basic-offset: 3; -*- */
3 /*--------------------------------------------------------------------*/
4 /*--- The address space manager: segment initialisation and ---*/
5 /*--- tracking, stack operations ---*/
7 /*--- Implementation for Linux (and Darwin!) aspacemgr-linux.c ---*/
8 /*--------------------------------------------------------------------*/
11 This file is part of Valgrind, a dynamic binary instrumentation
14 Copyright (C) 2000-2017 Julian Seward
17 This program is free software; you can redistribute it and/or
18 modify it under the terms of the GNU General Public License as
19 published by the Free Software Foundation; either version 2 of the
20 License, or (at your option) any later version.
22 This program is distributed in the hope that it will be useful, but
23 WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
25 General Public License for more details.
27 You should have received a copy of the GNU General Public License
28 along with this program; if not, write to the Free Software
29 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
32 The GNU General Public License is contained in the file COPYING.
35 #if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
37 /* *************************************************************
38 DO NOT INCLUDE ANY OTHER FILES HERE.
39 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
40 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
41 ************************************************************* */
43 #include "priv_aspacemgr.h"
47 /* Note: many of the exported functions implemented below are
48 described more fully in comments in pub_core_aspacemgr.h.
52 /*-----------------------------------------------------------------*/
56 /*-----------------------------------------------------------------*/
60 The purpose of the address space manager (aspacem) is:
62 (1) to record the disposition of all parts of the process' address
65 (2) to the extent that it can, influence layout in ways favourable
68 It is important to appreciate that whilst it can and does attempt
69 to influence layout, and usually succeeds, it isn't possible to
70 impose absolute control: in the end, the kernel is the final
71 arbiter, and can always bounce our requests.
75 The strategy is therefore as follows:
77 * Track ownership of mappings. Each one can belong either to
78 Valgrind or to the client.
80 * Try to place the client's fixed and hinted mappings at the
81 requested addresses. Fixed mappings are allowed anywhere except
82 in areas reserved by Valgrind; the client can trash its own
83 mappings if it wants. Hinted mappings are allowed providing they
84 fall entirely in free areas; if not, they will be placed by
85 aspacem in a free area.
87 * Anonymous mappings are allocated so as to keep Valgrind and
88 client areas widely separated when possible. If address space
89 runs low, then they may become intermingled: aspacem will attempt
90 to use all possible space. But under most circumstances lack of
91 address space is not a problem and so the areas will remain far
94 Searches for client space start at aspacem_cStart and will wrap
95 around the end of the available space if needed. Searches for
96 Valgrind space start at aspacem_vStart and will also wrap around.
97 Because aspacem_cStart is approximately at the start of the
98 available space and aspacem_vStart is approximately in the
99 middle, for the most part the client anonymous mappings will be
100 clustered towards the start of available space, and Valgrind ones
103 On Solaris, searches for client space start at (aspacem_vStart - 1)
104 and for Valgrind space start at (aspacem_maxAddr - 1) and go backwards.
105 This simulates what kernel does - brk limit grows from bottom and mmap'ed
106 objects from top. It is in contrary with Linux where data segment
107 and mmap'ed objects grow from bottom (leading to early data segment
108 exhaustion for tools which do not use m_replacemalloc). While Linux glibc
109 can cope with this problem by employing mmap, Solaris libc treats inability
110 to grow brk limit as a hard failure.
112 The available space is delimited by aspacem_minAddr and
113 aspacem_maxAddr. aspacem is flexible and can operate with these
114 at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
115 to some low-ish value at startup (64M) and aspacem_maxAddr is
116 derived from the stack pointer at system startup. This seems a
117 reliable way to establish the initial boundaries.
118 A command line option allows to change the value of aspacem_minAddr,
119 so as to allow memory hungry applications to use the lowest
122 64-bit Linux is similar except for the important detail that the
123 upper boundary is set to 64G. The reason is so that all
124 anonymous mappings (basically all client data areas) are kept
125 below 64G, since that is the maximum range that memcheck can
126 track shadow memory using a fast 2-level sparse array. It can go
127 beyond that but runs much more slowly. The 64G limit is
128 arbitrary and is trivially changed. So, with the current
129 settings, programs on 64-bit Linux will appear to run out of
130 address space and presumably fail at the 64G limit. Given the
131 considerable space overhead of Memcheck, that means you should be
132 able to memcheckify programs that use up to about 32G natively.
134 Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
135 anonymous mappings. The client can still do fixed and hinted maps
136 at any addresses provided they do not overlap Valgrind's segments.
137 This makes Valgrind able to load prelinked .so's at their requested
138 addresses on 64-bit platforms, even if they are very high (eg,
141 At startup, aspacem establishes the usable limits, and advises
142 m_main to place the client stack at the top of the range, which on
143 a 32-bit machine will be just below the real initial stack. One
144 effect of this is that self-hosting sort-of works, because an inner
145 valgrind will then place its client's stack just below its own
148 The segment array and segment kinds
149 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
150 The central data structure is the segment array (segments[0
151 .. nsegments_used-1]). This covers the entire address space in
152 order, giving account of every byte of it. Free spaces are
153 represented explicitly as this makes many operations simpler.
154 Mergeable adjacent segments are aggressively merged so as to create
155 a "normalised" representation (preen_nsegments).
157 There are 7 (mutually-exclusive) segment kinds, the meaning of
160 SkFree: a free space, which may be allocated either to Valgrind (V)
163 SkAnonC: an anonymous mapping belonging to C. For these, aspacem
164 tracks a boolean indicating whether or not is is part of the
165 client's heap area (can't remember why).
167 SkFileC: a file mapping belonging to C.
169 SkShmC: a shared memory segment belonging to C.
171 SkAnonV: an anonymous mapping belonging to V. These cover all V's
172 dynamic memory needs, including non-client malloc/free areas,
173 shadow memory, and the translation cache.
175 SkFileV: a file mapping belonging to V. As far as I know these are
176 only created transiently for the purposes of reading debug info.
178 SkResvn: a reservation segment.
180 These are mostly straightforward. Reservation segments have some
183 A reservation segment is unmapped from the kernel's point of view,
184 but is an area in which aspacem will not create anonymous maps
185 (either Vs or Cs). The idea is that we will try to keep it clear
186 when the choice to do so is ours. Reservation segments are
187 'invisible' from the client's point of view: it may choose to park
188 a fixed mapping in the middle of one, and that's just tough -- we
189 can't do anything about that. From the client's perspective
190 reservations are semantically equivalent to (although
191 distinguishable from, if it makes enquiries) free areas.
193 Reservations are a primitive mechanism provided for whatever
194 purposes the rest of the system wants. Currently they are used to
195 reserve the expansion space into which a growdown stack is
196 expanded, and into which the data segment is extended. Note,
197 though, those uses are entirely external to this module, which only
198 supplies the primitives.
200 Reservations may be shrunk in order that an adjoining anonymous
201 mapping may be extended. This makes dataseg/stack expansion work.
202 A reservation may not be shrunk below one page.
204 The advise/notify concept
205 ~~~~~~~~~~~~~~~~~~~~~~~~~
206 All mmap-related calls must be routed via aspacem. Calling
207 sys_mmap directly from the rest of the system is very dangerous
208 because aspacem's data structures will become out of date.
210 The fundamental mode of operation of aspacem is to support client
211 mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
213 * m_syswrap intercepts the mmap call. It examines the parameters
214 and identifies the requested placement constraints. There are
215 three possibilities: no constraint (MAny), hinted (MHint, "I
216 prefer X but will accept anything"), and fixed (MFixed, "X or
219 * This request is passed to VG_(am_get_advisory). This decides on
220 a placement as described in detail in Strategy above. It may
221 also indicate that the map should fail, because it would trash
222 one of Valgrind's areas, which would probably kill the system.
224 * Control returns to the wrapper. If VG_(am_get_advisory) has
225 declared that the map should fail, then it must be made to do so.
226 Usually, though, the request is considered acceptable, in which
227 case an "advised" address is supplied. The advised address
228 replaces the original address supplied by the client, and
231 Note at this point that although aspacem has been asked for
232 advice on where to place the mapping, no commitment has yet been
233 made by either it or the kernel.
235 * The adjusted request is handed off to the kernel.
237 * The kernel's result is examined. If the map succeeded, aspacem
238 is told of the outcome (VG_(am_notify_client_mmap)), so it can
239 update its records accordingly.
241 This then is the central advise-notify idiom for handling client
242 mmap/munmap/mprotect/shmat:
244 * ask aspacem for an advised placement (or a veto)
246 * if not vetoed, hand request to kernel, using the advised placement
248 * examine result, and if successful, notify aspacem of the result.
250 There are also many convenience functions, eg
251 VG_(am_mmap_anon_fixed_client), which do both phases entirely within
254 To debug all this, a sync-checker is provided. It reads
255 /proc/self/maps, compares what it sees with aspacem's records, and
256 complains if there is a difference. --sanity-level=3 runs it before
257 and after each syscall, which is a powerful, if slow way of finding
258 buggy syscall wrappers.
262 Up to and including Valgrind 2.4.1, x86 segmentation was used to
263 enforce separation of V and C, so that wild writes by C could not
264 trash V. This got called "pointercheck". Unfortunately, the new
265 more flexible memory layout, plus the need to be portable across
266 different architectures, means doing this in hardware is no longer
267 viable, and doing it in software is expensive. So at the moment we
272 /*-----------------------------------------------------------------*/
274 /*--- The Address Space Manager's state. ---*/
276 /*-----------------------------------------------------------------*/
278 /* ------ start of STATE for the address-space manager ------ */
280 /* Max number of segments we can track. On Android, virtual address
281 space is limited, so keep a low limit -- 5000 x sizef(NSegment) is
283 #if defined(VGPV_arm_linux_android) \
284 || defined(VGPV_x86_linux_android) \
285 || defined(VGPV_mips32_linux_android) \
286 || defined(VGPV_arm64_linux_android)
287 # define VG_N_SEGMENTS 5000
289 # define VG_N_SEGMENTS 30000
292 /* Array [0 .. nsegments_used-1] of all mappings. */
293 /* Sorted by .addr field. */
294 /* I: len may not be zero. */
295 /* I: overlapping segments are not allowed. */
296 /* I: the segments cover the entire address space precisely. */
297 /* Each segment can optionally hold an index into the filename table. */
299 static NSegment nsegments
[VG_N_SEGMENTS
];
300 static Int nsegments_used
= 0;
302 #define Addr_MIN ((Addr)0)
303 #define Addr_MAX ((Addr)(-1ULL))
308 Addr
VG_(clo_aspacem_minAddr
)
309 #if defined(VGO_linux)
310 = (Addr
) 0x04000000; // 64M
311 #elif defined(VGO_darwin)
312 # if VG_WORDSIZE == 4
315 = (Addr
) 0x100000000; // 4GB page zero
317 #elif defined(VGO_solaris)
318 = (Addr
) 0x00100000; // 1MB
323 // The smallest address that aspacem will try to allocate
324 static Addr aspacem_minAddr
= 0;
326 // The largest address that aspacem will try to allocate
327 static Addr aspacem_maxAddr
= 0;
329 // Where aspacem will start looking for client space
330 static Addr aspacem_cStart
= 0;
332 // Where aspacem will start looking for Valgrind space
333 static Addr aspacem_vStart
= 0;
336 #define AM_SANITY_CHECK \
338 if (VG_(clo_sanity_level) >= 3) \
339 aspacem_assert(VG_(am_do_sync_check) \
340 (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
343 /* ------ end of STATE for the address-space manager ------ */
345 /* ------ Forwards decls ------ */
347 static Int
find_nsegment_idx ( Addr a
);
349 static void parse_procselfmaps (
350 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
351 ULong dev
, ULong ino
, Off64T offset
,
352 const HChar
* filename
),
353 void (*record_gap
)( Addr addr
, SizeT len
)
356 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
357 /* Not that I have anything against the commpage per se. It's just
358 that it's not listed in /proc/self/maps, which is a royal PITA --
359 we have to fake it up, in parse_procselfmaps.
361 But note also bug 254556 comment #2: this is now fixed in newer
362 kernels -- it is listed as a "[vectors]" entry. Presumably the
363 fake entry made here duplicates the [vectors] entry, and so, if at
364 some point in the future, we can stop supporting buggy kernels,
365 then this kludge can be removed entirely, since the procmap parser
366 below will read that entry in the normal way. */
367 #if defined(VGP_arm_linux)
368 # define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
369 # define ARM_LINUX_FAKE_COMMPAGE_END1 0xFFFF1000
374 /*-----------------------------------------------------------------*/
376 /*--- Displaying the segment array. ---*/
378 /*-----------------------------------------------------------------*/
380 static const HChar
* show_SegKind ( SegKind sk
)
383 case SkFree
: return " ";
384 case SkAnonC
: return "anon";
385 case SkAnonV
: return "ANON";
386 case SkFileC
: return "file";
387 case SkFileV
: return "FILE";
388 case SkShmC
: return "shm ";
389 case SkResvn
: return "RSVN";
390 default: return "????";
394 static const HChar
* show_ShrinkMode ( ShrinkMode sm
)
397 case SmLower
: return "SmLower";
398 case SmUpper
: return "SmUpper";
399 case SmFixed
: return "SmFixed";
400 default: return "Sm?????";
404 static void show_len_concisely ( /*OUT*/HChar
* buf
, Addr start
, Addr end
)
407 ULong len
= ((ULong
)end
) - ((ULong
)start
) + 1;
409 if (len
< 10*1000*1000ULL) {
412 else if (len
< 999999ULL * (1ULL<<20)) {
416 else if (len
< 999999ULL * (1ULL<<30)) {
420 else if (len
< 999999ULL * (1ULL<<40)) {
428 ML_(am_sprintf
)(buf
, fmt
, len
);
431 /* Show full details of an NSegment */
433 static void show_nsegment_full ( Int logLevel
, Int segNo
, const NSegment
* seg
)
436 const HChar
* name
= ML_(am_get_segname
)( seg
->fnIdx
);
441 show_len_concisely(len_buf
, seg
->start
, seg
->end
);
445 "%3d: %s %010lx-%010lx %s %c%c%c%c%c %s "
446 "d=0x%03llx i=%-7llu o=%-7lld (%d,%d) %s\n",
447 segNo
, show_SegKind(seg
->kind
),
448 seg
->start
, seg
->end
, len_buf
,
449 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
450 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
451 seg
->isCH
? 'H' : '-',
452 show_ShrinkMode(seg
->smode
),
453 seg
->dev
, seg
->ino
, seg
->offset
,
454 ML_(am_segname_get_seqnr
)(seg
->fnIdx
), seg
->fnIdx
,
460 /* Show an NSegment in a user-friendly-ish way. */
462 static void show_nsegment ( Int logLevel
, Int segNo
, const NSegment
* seg
)
465 show_len_concisely(len_buf
, seg
->start
, seg
->end
);
472 "%3d: %s %010lx-%010lx %s\n",
473 segNo
, show_SegKind(seg
->kind
),
474 seg
->start
, seg
->end
, len_buf
478 case SkAnonC
: case SkAnonV
: case SkShmC
:
481 "%3d: %s %010lx-%010lx %s %c%c%c%c%c\n",
482 segNo
, show_SegKind(seg
->kind
),
483 seg
->start
, seg
->end
, len_buf
,
484 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
485 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
486 seg
->isCH
? 'H' : '-'
490 case SkFileC
: case SkFileV
:
493 "%3d: %s %010lx-%010lx %s %c%c%c%c%c d=0x%03llx "
494 "i=%-7llu o=%-7lld (%d,%d)\n",
495 segNo
, show_SegKind(seg
->kind
),
496 seg
->start
, seg
->end
, len_buf
,
497 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
498 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
499 seg
->isCH
? 'H' : '-',
500 seg
->dev
, seg
->ino
, seg
->offset
,
501 ML_(am_segname_get_seqnr
)(seg
->fnIdx
), seg
->fnIdx
508 "%3d: %s %010lx-%010lx %s %c%c%c%c%c %s\n",
509 segNo
, show_SegKind(seg
->kind
),
510 seg
->start
, seg
->end
, len_buf
,
511 seg
->hasR
? 'r' : '-', seg
->hasW
? 'w' : '-',
512 seg
->hasX
? 'x' : '-', seg
->hasT
? 'T' : '-',
513 seg
->isCH
? 'H' : '-',
514 show_ShrinkMode(seg
->smode
)
521 "%3d: ???? UNKNOWN SEGMENT KIND\n",
528 /* Print out the segment array (debugging only!). */
529 void VG_(am_show_nsegments
) ( Int logLevel
, const HChar
* who
)
532 VG_(debugLog
)(logLevel
, "aspacem",
533 "<<< SHOW_SEGMENTS: %s (%d segments)\n",
534 who
, nsegments_used
);
535 ML_(am_show_segnames
)( logLevel
, who
);
536 for (i
= 0; i
< nsegments_used
; i
++)
537 show_nsegment( logLevel
, i
, &nsegments
[i
] );
538 VG_(debugLog
)(logLevel
, "aspacem",
543 /* Get the filename corresponding to this segment, if known and if it
545 const HChar
* VG_(am_get_filename
)( NSegment
const * seg
)
548 return ML_(am_get_segname
)( seg
->fnIdx
);
551 /* Collect up the start addresses of segments whose kind matches one of
552 the kinds specified in kind_mask.
553 The interface is a bit strange in order to avoid potential
554 segment-creation races caused by dynamic allocation of the result
557 The function first computes how many entries in the result
558 buffer *starts will be needed. If this number <= nStarts,
559 they are placed in starts[0..], and the number is returned.
560 If nStarts is not large enough, nothing is written to
561 starts[0..], and the negation of the size is returned.
563 Correct use of this function may mean calling it multiple times in
564 order to establish a suitably-sized buffer. */
566 Int
VG_(am_get_segment_starts
)( UInt kind_mask
, Addr
* starts
, Int nStarts
)
570 /* don't pass dumbass arguments */
571 aspacem_assert(nStarts
> 0);
574 for (i
= 0; i
< nsegments_used
; i
++) {
575 if ((nsegments
[i
].kind
& kind_mask
) != 0)
579 if (nSegs
> nStarts
) {
580 /* The buffer isn't big enough. Tell the caller how big it needs
585 /* There's enough space. So write into the result buffer. */
586 aspacem_assert(nSegs
<= nStarts
);
589 for (i
= 0; i
< nsegments_used
; i
++) {
590 if ((nsegments
[i
].kind
& kind_mask
) != 0)
591 starts
[j
++] = nsegments
[i
].start
;
594 aspacem_assert(j
== nSegs
); /* this should not fail */
599 /*-----------------------------------------------------------------*/
601 /*--- Sanity checking and preening of the segment array. ---*/
603 /*-----------------------------------------------------------------*/
605 /* Check representational invariants for NSegments. */
607 static Bool
sane_NSegment ( const NSegment
* s
)
609 if (s
== NULL
) return False
;
611 /* No zero sized segments and no wraparounds. */
612 if (s
->start
> s
->end
) return False
;
614 /* require page alignment */
615 if (!VG_IS_PAGE_ALIGNED(s
->start
)) return False
;
616 if (!VG_IS_PAGE_ALIGNED(s
->end
+1)) return False
;
623 && s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
624 && !s
->hasR
&& !s
->hasW
&& !s
->hasX
&& !s
->hasT
627 case SkAnonC
: case SkAnonV
: case SkShmC
:
630 && s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
631 && (s
->kind
==SkAnonC
? True
: !s
->isCH
);
633 case SkFileC
: case SkFileV
:
636 && ML_(am_sane_segname
)(s
->fnIdx
)
641 s
->dev
== 0 && s
->ino
== 0 && s
->offset
== 0 && s
->fnIdx
== -1
642 && !s
->hasR
&& !s
->hasW
&& !s
->hasX
&& !s
->hasT
651 /* Try merging s2 into s1, if possible. If successful, s1 is
652 modified, and True is returned. Otherwise s1 is unchanged and
653 False is returned. */
655 static Bool
maybe_merge_nsegments ( NSegment
* s1
, const NSegment
* s2
)
657 if (s1
->kind
!= s2
->kind
)
660 if (s1
->end
+1 != s2
->start
)
663 /* reject cases which would cause wraparound */
664 if (s1
->start
> s2
->end
)
673 case SkAnonC
: case SkAnonV
:
674 if (s1
->hasR
== s2
->hasR
&& s1
->hasW
== s2
->hasW
675 && s1
->hasX
== s2
->hasX
&& s1
->isCH
== s2
->isCH
) {
677 s1
->hasT
|= s2
->hasT
;
682 case SkFileC
: case SkFileV
:
683 if (s1
->hasR
== s2
->hasR
684 && s1
->hasW
== s2
->hasW
&& s1
->hasX
== s2
->hasX
685 && s1
->dev
== s2
->dev
&& s1
->ino
== s2
->ino
686 && s2
->offset
== s1
->offset
687 + ((ULong
)s2
->start
) - ((ULong
)s1
->start
) ) {
689 s1
->hasT
|= s2
->hasT
;
690 ML_(am_dec_refcount
)(s1
->fnIdx
);
699 if (s1
->smode
== SmFixed
&& s2
->smode
== SmFixed
) {
713 /* Sanity-check and canonicalise the segment array (merge mergable
714 segments). Returns True if any segments were merged. */
716 static Bool
preen_nsegments ( void )
718 Int i
, r
, w
, nsegments_used_old
= nsegments_used
;
720 /* Pass 1: check the segment array covers the entire address space
721 exactly once, and also that each segment is sane. */
722 aspacem_assert(nsegments_used
> 0);
723 aspacem_assert(nsegments
[0].start
== Addr_MIN
);
724 aspacem_assert(nsegments
[nsegments_used
-1].end
== Addr_MAX
);
726 aspacem_assert(sane_NSegment(&nsegments
[0]));
727 for (i
= 1; i
< nsegments_used
; i
++) {
728 aspacem_assert(sane_NSegment(&nsegments
[i
]));
729 aspacem_assert(nsegments
[i
-1].end
+1 == nsegments
[i
].start
);
732 /* Pass 2: merge as much as possible, using
733 maybe_merge_segments. */
735 for (r
= 1; r
< nsegments_used
; r
++) {
736 if (maybe_merge_nsegments(&nsegments
[w
], &nsegments
[r
])) {
741 nsegments
[w
] = nsegments
[r
];
745 aspacem_assert(w
> 0 && w
<= nsegments_used
);
748 return nsegments_used
!= nsegments_used_old
;
752 /* Check the segment array corresponds with the kernel's view of
753 memory layout. sync_check_ok returns True if no anomalies were
754 found, else False. In the latter case the mismatching segments are
757 The general idea is: we get the kernel to show us all its segments
758 and also the gaps in between. For each such interval, try and find
759 a sequence of appropriate intervals in our segment array which
760 cover or more than cover the kernel's interval, and which all have
761 suitable kinds/permissions etc.
763 Although any specific kernel interval is not matched exactly to a
764 valgrind interval or sequence thereof, eventually any disagreement
765 on mapping boundaries will be detected. This is because, if for
766 example valgrind's intervals cover a greater range than the current
767 kernel interval, it must be the case that a neighbouring free-space
768 interval belonging to valgrind cannot cover the neighbouring
769 free-space interval belonging to the kernel. So the disagreement
772 In other words, we examine each kernel interval in turn, and check
773 we do not disagree over the range of that interval. Because all of
774 the address space is examined, any disagreements must eventually be
778 static Bool sync_check_ok
= False
;
780 static void sync_check_mapping_callback ( Addr addr
, SizeT len
, UInt prot
,
781 ULong dev
, ULong ino
, Off64T offset
,
782 const HChar
* filename
)
785 Bool sloppyXcheck
, sloppyRcheck
;
787 /* If a problem has already been detected, don't continue comparing
788 segments, so as to avoid flooding the output with error
790 #if !defined(VGO_darwin)
798 /* The kernel should not give us wraparounds. */
799 aspacem_assert(addr
<= addr
+ len
- 1);
801 iLo
= find_nsegment_idx( addr
);
802 iHi
= find_nsegment_idx( addr
+ len
- 1 );
804 /* These 5 should be guaranteed by find_nsegment_idx. */
805 aspacem_assert(0 <= iLo
&& iLo
< nsegments_used
);
806 aspacem_assert(0 <= iHi
&& iHi
< nsegments_used
);
807 aspacem_assert(iLo
<= iHi
);
808 aspacem_assert(nsegments
[iLo
].start
<= addr
);
809 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1 );
811 /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
812 most recent NX-bit enabled CPUs) and so recent kernels attempt
813 to provide execute protection by placing all executable mappings
814 low down in the address space and then reducing the size of the
815 code segment to prevent code at higher addresses being executed.
817 These kernels report which mappings are really executable in
818 the /proc/self/maps output rather than mirroring what was asked
819 for when each mapping was created. In order to cope with this we
820 have a sloppyXcheck mode which we enable on x86 and s390 - in this
821 mode we allow the kernel to report execute permission when we weren't
822 expecting it but not vice versa. */
823 # if defined(VGA_x86) || defined (VGA_s390x)
826 sloppyXcheck
= False
;
829 /* Some kernels on s390 provide 'r' permission even when it was not
830 explicitly requested. It seems that 'x' permission implies 'r'.
831 This behaviour also occurs on OS X. */
832 # if defined(VGA_s390x) || defined(VGO_darwin)
835 sloppyRcheck
= False
;
838 /* NSegments iLo .. iHi inclusive should agree with the presented
840 for (i
= iLo
; i
<= iHi
; i
++) {
842 Bool same
, cmp_offsets
, cmp_devino
;
845 /* compare the kernel's offering against ours. */
846 same
= nsegments
[i
].kind
== SkAnonC
847 || nsegments
[i
].kind
== SkAnonV
848 || nsegments
[i
].kind
== SkFileC
849 || nsegments
[i
].kind
== SkFileV
850 || nsegments
[i
].kind
== SkShmC
;
853 if (nsegments
[i
].hasR
) seg_prot
|= VKI_PROT_READ
;
854 if (nsegments
[i
].hasW
) seg_prot
|= VKI_PROT_WRITE
;
855 if (nsegments
[i
].hasX
) seg_prot
|= VKI_PROT_EXEC
;
858 = nsegments
[i
].kind
== SkFileC
|| nsegments
[i
].kind
== SkFileV
;
861 = nsegments
[i
].dev
!= 0 || nsegments
[i
].ino
!= 0;
863 /* Consider other reasons to not compare dev/inode */
864 #if defined(VGO_linux)
865 /* bproc does some godawful hack on /dev/zero at process
866 migration, which changes the name of it, and its dev & ino */
867 if (filename
&& 0==VG_(strcmp
)(filename
, "/dev/zero (deleted)"))
870 /* hack apparently needed on MontaVista Linux */
871 if (filename
&& VG_(strstr
)(filename
, "/.lib-ro/"))
875 #if defined(VGO_darwin)
876 // GrP fixme kernel info doesn't have dev/inode
879 // GrP fixme V and kernel don't agree on offsets
883 /* If we are doing sloppy execute permission checks then we
884 allow segment to have X permission when we weren't expecting
885 it (but not vice versa) so if the kernel reported execute
886 permission then pretend that this segment has it regardless
887 of what we were expecting. */
888 if (sloppyXcheck
&& (prot
& VKI_PROT_EXEC
) != 0) {
889 seg_prot
|= VKI_PROT_EXEC
;
892 if (sloppyRcheck
&& (prot
& (VKI_PROT_EXEC
| VKI_PROT_READ
)) ==
893 (VKI_PROT_EXEC
| VKI_PROT_READ
)) {
894 seg_prot
|= VKI_PROT_READ
;
900 ? (nsegments
[i
].dev
== dev
&& nsegments
[i
].ino
== ino
)
903 ? nsegments
[i
].start
-nsegments
[i
].offset
== addr
-offset
907 Addr end
= start
+ len
- 1;
909 show_len_concisely(len_buf
, start
, end
);
911 sync_check_ok
= False
;
915 "segment mismatch: V's seg 1st, kernel's 2nd:\n");
916 show_nsegment_full( 0, i
, &nsegments
[i
] );
917 VG_(debugLog
)(0,"aspacem",
918 "...: .... %010lx-%010lx %s %c%c%c.. ....... "
919 "d=0x%03llx i=%-7llu o=%-7lld (.) m=. %s\n",
921 prot
& VKI_PROT_READ
? 'r' : '-',
922 prot
& VKI_PROT_WRITE
? 'w' : '-',
923 prot
& VKI_PROT_EXEC
? 'x' : '-',
924 dev
, ino
, offset
, filename
? filename
: "(none)" );
930 /* Looks harmless. Keep going. */
934 static void sync_check_gap_callback ( Addr addr
, SizeT len
)
938 /* If a problem has already been detected, don't continue comparing
939 segments, so as to avoid flooding the output with error
941 #if !defined(VGO_darwin)
949 /* The kernel should not give us wraparounds. */
950 aspacem_assert(addr
<= addr
+ len
- 1);
952 iLo
= find_nsegment_idx( addr
);
953 iHi
= find_nsegment_idx( addr
+ len
- 1 );
955 /* These 5 should be guaranteed by find_nsegment_idx. */
956 aspacem_assert(0 <= iLo
&& iLo
< nsegments_used
);
957 aspacem_assert(0 <= iHi
&& iHi
< nsegments_used
);
958 aspacem_assert(iLo
<= iHi
);
959 aspacem_assert(nsegments
[iLo
].start
<= addr
);
960 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1 );
962 /* NSegments iLo .. iHi inclusive should agree with the presented
964 for (i
= iLo
; i
<= iHi
; i
++) {
968 /* compare the kernel's offering against ours. */
969 same
= nsegments
[i
].kind
== SkFree
970 || nsegments
[i
].kind
== SkResvn
;
974 Addr end
= start
+ len
- 1;
976 show_len_concisely(len_buf
, start
, end
);
978 sync_check_ok
= False
;
982 "segment mismatch: V's gap 1st, kernel's 2nd:\n");
983 show_nsegment_full( 0, i
, &nsegments
[i
] );
984 VG_(debugLog
)(0,"aspacem",
985 " : .... %010lx-%010lx %s\n",
986 start
, end
, len_buf
);
991 /* Looks harmless. Keep going. */
996 /* Sanity check: check that Valgrind and the kernel agree on the
997 address space layout. Prints offending segments and call point if
998 a discrepancy is detected, but does not abort the system. Returned
999 Bool is False if a discrepancy was found. */
1001 Bool
VG_(am_do_sync_check
) ( const HChar
* fn
,
1002 const HChar
* file
, Int line
)
1004 sync_check_ok
= True
;
1006 VG_(debugLog
)(0,"aspacem", "do_sync_check %s:%d\n", file
,line
);
1007 parse_procselfmaps( sync_check_mapping_callback
,
1008 sync_check_gap_callback
);
1009 if (!sync_check_ok
) {
1010 VG_(debugLog
)(0,"aspacem",
1011 "sync check at %s:%d (%s): FAILED\n",
1013 VG_(debugLog
)(0,"aspacem", "\n");
1017 HChar buf
[100]; // large enough
1018 VG_(am_show_nsegments
)(0,"post syncheck failure");
1019 VG_(sprintf
)(buf
, "/bin/cat /proc/%d/maps", VG_(getpid
)());
1025 return sync_check_ok
;
1028 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1029 void ML_(am_do_sanity_check
)( void )
1035 /*-----------------------------------------------------------------*/
1037 /*--- Low level access / modification of the segment array. ---*/
1039 /*-----------------------------------------------------------------*/
1041 /* Binary search the interval array for a given address. Since the
1042 array covers the entire address space the search cannot fail. The
1043 _WRK function does the real work. Its caller (just below) caches
1044 the results thereof, to save time. With N_CACHE of 63 we get a hit
1045 rate exceeding 90% when running OpenOffice.
1047 Re ">> 12", it doesn't matter that the page size of some targets
1048 might be different from 12. Really "(a >> 12) % N_CACHE" is merely
1049 a hash function, and the actual cache entry is always validated
1050 correctly against the selected cache entry before use.
1052 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1053 __attribute__((noinline
))
1054 static Int
find_nsegment_idx_WRK ( Addr a
)
1056 Addr a_mid_lo
, a_mid_hi
;
1059 hi
= nsegments_used
-1;
1061 /* current unsearched space is from lo to hi, inclusive. */
1063 /* Not found. This can't happen. */
1064 ML_(am_barf
)("find_nsegment_idx: not found");
1066 mid
= (lo
+ hi
) / 2;
1067 a_mid_lo
= nsegments
[mid
].start
;
1068 a_mid_hi
= nsegments
[mid
].end
;
1070 if (a
< a_mid_lo
) { hi
= mid
-1; continue; }
1071 if (a
> a_mid_hi
) { lo
= mid
+1; continue; }
1072 aspacem_assert(a
>= a_mid_lo
&& a
<= a_mid_hi
);
1073 aspacem_assert(0 <= mid
&& mid
< nsegments_used
);
1078 inline static Int
find_nsegment_idx ( Addr a
)
1080 # define N_CACHE 131 /*prime*/
1081 static Addr cache_pageno
[N_CACHE
];
1082 static Int cache_segidx
[N_CACHE
];
1083 static Bool cache_inited
= False
;
1086 static UWord n_q
= 0;
1087 static UWord n_m
= 0;
1089 if (0 == (n_q
& 0xFFFF))
1090 VG_(debugLog
)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q
, n_m
);
1095 if (LIKELY(cache_inited
)) {
1098 for (ix
= 0; ix
< N_CACHE
; ix
++) {
1099 cache_pageno
[ix
] = 0;
1100 cache_segidx
[ix
] = -1;
1102 cache_inited
= True
;
1105 ix
= (a
>> 12) % N_CACHE
;
1107 if ((a
>> 12) == cache_pageno
[ix
]
1108 && cache_segidx
[ix
] >= 0
1109 && cache_segidx
[ix
] < nsegments_used
1110 && nsegments
[cache_segidx
[ix
]].start
<= a
1111 && a
<= nsegments
[cache_segidx
[ix
]].end
) {
1113 /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1114 return cache_segidx
[ix
];
1120 cache_segidx
[ix
] = find_nsegment_idx_WRK(a
);
1121 cache_pageno
[ix
] = a
>> 12;
1122 return cache_segidx
[ix
];
1127 /* Finds the segment containing 'a'. Only returns non-SkFree segments. */
1128 NSegment
const * VG_(am_find_nsegment
) ( Addr a
)
1130 Int i
= find_nsegment_idx(a
);
1131 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1132 aspacem_assert(nsegments
[i
].start
<= a
);
1133 aspacem_assert(a
<= nsegments
[i
].end
);
1134 if (nsegments
[i
].kind
== SkFree
)
1137 return &nsegments
[i
];
1140 /* Finds an anonymous segment containing 'a'. Returned pointer is read only. */
1141 NSegment
const *VG_(am_find_anon_segment
) ( Addr a
)
1143 Int i
= find_nsegment_idx(a
);
1144 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1145 aspacem_assert(nsegments
[i
].start
<= a
);
1146 aspacem_assert(a
<= nsegments
[i
].end
);
1147 if (nsegments
[i
].kind
== SkAnonC
|| nsegments
[i
].kind
== SkAnonV
)
1148 return &nsegments
[i
];
1153 /* Map segment pointer to segment index. */
1154 static Int
segAddr_to_index ( const NSegment
* seg
)
1156 aspacem_assert(seg
>= &nsegments
[0] && seg
< &nsegments
[nsegments_used
]);
1158 return seg
- &nsegments
[0];
1162 /* Find the next segment along from 'here', if it is a non-SkFree segment. */
1163 NSegment
const * VG_(am_next_nsegment
) ( const NSegment
* here
, Bool fwds
)
1165 Int i
= segAddr_to_index(here
);
1169 if (i
>= nsegments_used
)
1176 if (nsegments
[i
].kind
== SkFree
)
1179 return &nsegments
[i
];
1183 /* Trivial fn: return the total amount of space in anonymous mappings,
1184 both for V and the client. Is used for printing stats in
1185 out-of-memory messages. */
1186 ULong
VG_(am_get_anonsize_total
)( void )
1190 for (i
= 0; i
< nsegments_used
; i
++) {
1191 if (nsegments
[i
].kind
== SkAnonC
|| nsegments
[i
].kind
== SkAnonV
) {
1192 total
+= (ULong
)nsegments
[i
].end
1193 - (ULong
)nsegments
[i
].start
+ 1ULL;
1200 /* Test if a piece of memory is addressable by client or by valgrind with at
1201 least the "prot" protection permissions by examining the underlying
1202 segments. The KINDS argument specifies the allowed segments ADDR may
1203 belong to in order to be considered "valid".
1206 Bool
is_valid_for( UInt kinds
, Addr start
, SizeT len
, UInt prot
)
1209 Bool needR
, needW
, needX
;
1212 return True
; /* somewhat dubious case */
1213 if (start
+ len
< start
)
1214 return False
; /* reject wraparounds */
1216 needR
= toBool(prot
& VKI_PROT_READ
);
1217 needW
= toBool(prot
& VKI_PROT_WRITE
);
1218 needX
= toBool(prot
& VKI_PROT_EXEC
);
1220 iLo
= find_nsegment_idx(start
);
1221 aspacem_assert(start
>= nsegments
[iLo
].start
);
1223 if (start
+len
-1 <= nsegments
[iLo
].end
) {
1224 /* This is a speedup hack which avoids calling find_nsegment_idx
1225 a second time when possible. It is always correct to just
1226 use the "else" clause below, but is_valid_for_client is
1227 called a lot by the leak checker, so avoiding pointless calls
1228 to find_nsegment_idx, which can be expensive, is helpful. */
1231 iHi
= find_nsegment_idx(start
+ len
- 1);
1234 for (i
= iLo
; i
<= iHi
; i
++) {
1235 if ( (nsegments
[i
].kind
& kinds
) != 0
1236 && (needR
? nsegments
[i
].hasR
: True
)
1237 && (needW
? nsegments
[i
].hasW
: True
)
1238 && (needX
? nsegments
[i
].hasX
: True
) ) {
1248 /* Test if a piece of memory is addressable by the client with at
1249 least the "prot" protection permissions by examining the underlying
1251 Bool
VG_(am_is_valid_for_client
)( Addr start
, SizeT len
,
1254 const UInt kinds
= SkFileC
| SkAnonC
| SkShmC
;
1256 return is_valid_for(kinds
, start
, len
, prot
);
1259 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1260 be consider part of the client's addressable space. It also
1261 considers reservations to be allowable, since from the client's
1262 point of view they don't exist. */
1263 Bool
VG_(am_is_valid_for_client_or_free_or_resvn
)
1264 ( Addr start
, SizeT len
, UInt prot
)
1266 const UInt kinds
= SkFileC
| SkAnonC
| SkShmC
| SkFree
| SkResvn
;
1268 return is_valid_for(kinds
, start
, len
, prot
);
1271 /* Checks if a piece of memory consists of either free or reservation
1273 Bool
VG_(am_is_free_or_resvn
)( Addr start
, SizeT len
)
1275 const UInt kinds
= SkFree
| SkResvn
;
1277 return is_valid_for(kinds
, start
, len
, 0);
1281 Bool
VG_(am_is_valid_for_valgrind
) ( Addr start
, SizeT len
, UInt prot
)
1283 const UInt kinds
= SkFileV
| SkAnonV
;
1285 return is_valid_for(kinds
, start
, len
, prot
);
1289 /* Returns True if any part of the address range is marked as having
1290 translations made from it. This is used to determine when to
1291 discard code, so if in doubt return True. */
1293 static Bool
any_Ts_in_range ( Addr start
, SizeT len
)
1296 aspacem_assert(len
> 0);
1297 aspacem_assert(start
+ len
> start
);
1298 iLo
= find_nsegment_idx(start
);
1299 iHi
= find_nsegment_idx(start
+ len
- 1);
1300 for (i
= iLo
; i
<= iHi
; i
++) {
1301 if (nsegments
[i
].hasT
)
1308 /* Check whether ADDR looks like an address or address-to-be located in an
1309 extensible client stack segment. Return true if
1310 (1) ADDR is located in an already mapped stack segment, OR
1311 (2) ADDR is located in a reservation segment into which an abutting SkAnonC
1312 segment can be extended. */
1313 Bool
VG_(am_addr_is_in_extensible_client_stack
)( Addr addr
)
1315 const NSegment
*seg
= nsegments
+ find_nsegment_idx(addr
);
1317 switch (seg
->kind
) {
1326 if (seg
->smode
!= SmUpper
) return False
;
1327 /* If the abutting segment towards higher addresses is an SkAnonC
1328 segment, then ADDR is a future stack pointer. */
1329 const NSegment
*next
= VG_(am_next_nsegment
)(seg
, /*forward*/ True
);
1330 if (next
== NULL
|| next
->kind
!= SkAnonC
) return False
;
1332 /* OK; looks like a stack segment */
1337 /* If the abutting segment towards lower addresses is an SkResvn
1338 segment, then ADDR is a stack pointer into mapped memory. */
1339 const NSegment
*next
= VG_(am_next_nsegment
)(seg
, /*forward*/ False
);
1340 if (next
== NULL
|| next
->kind
!= SkResvn
|| next
->smode
!= SmUpper
)
1343 /* OK; looks like a stack segment */
1348 aspacem_assert(0); // should never happen
1352 /*-----------------------------------------------------------------*/
1354 /*--- Modifying the segment array, and constructing segments. ---*/
1356 /*-----------------------------------------------------------------*/
1358 /* Split the segment containing 'a' into two, so that 'a' is
1359 guaranteed to be the start of a new segment. If 'a' is already the
1360 start of a segment, do nothing. */
1362 static void split_nsegment_at ( Addr a
)
1366 aspacem_assert(a
> 0);
1367 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
1369 i
= find_nsegment_idx(a
);
1370 aspacem_assert(i
>= 0 && i
< nsegments_used
);
1372 if (nsegments
[i
].start
== a
)
1373 /* 'a' is already the start point of a segment, so nothing to be
1377 /* else we have to slide the segments upwards to make a hole */
1378 if (nsegments_used
>= VG_N_SEGMENTS
)
1379 ML_(am_barf_toolow
)("VG_N_SEGMENTS");
1380 for (j
= nsegments_used
-1; j
> i
; j
--)
1381 nsegments
[j
+1] = nsegments
[j
];
1384 nsegments
[i
+1] = nsegments
[i
];
1385 nsegments
[i
+1].start
= a
;
1386 nsegments
[i
].end
= a
-1;
1388 if (nsegments
[i
].kind
== SkFileV
|| nsegments
[i
].kind
== SkFileC
)
1389 nsegments
[i
+1].offset
1390 += ((ULong
)nsegments
[i
+1].start
) - ((ULong
)nsegments
[i
].start
);
1392 ML_(am_inc_refcount
)(nsegments
[i
].fnIdx
);
1394 aspacem_assert(sane_NSegment(&nsegments
[i
]));
1395 aspacem_assert(sane_NSegment(&nsegments
[i
+1]));
1399 /* Do the minimum amount of segment splitting necessary to ensure that
1400 sLo is the first address denoted by some segment and sHi is the
1401 highest address denoted by some other segment. Returns the indices
1402 of the lowest and highest segments in the range. */
1405 void split_nsegments_lo_and_hi ( Addr sLo
, Addr sHi
,
1409 aspacem_assert(sLo
< sHi
);
1410 aspacem_assert(VG_IS_PAGE_ALIGNED(sLo
));
1411 aspacem_assert(VG_IS_PAGE_ALIGNED(sHi
+1));
1414 split_nsegment_at(sLo
);
1416 split_nsegment_at(sHi
+1);
1418 *iLo
= find_nsegment_idx(sLo
);
1419 *iHi
= find_nsegment_idx(sHi
);
1420 aspacem_assert(0 <= *iLo
&& *iLo
< nsegments_used
);
1421 aspacem_assert(0 <= *iHi
&& *iHi
< nsegments_used
);
1422 aspacem_assert(*iLo
<= *iHi
);
1423 aspacem_assert(nsegments
[*iLo
].start
== sLo
);
1424 aspacem_assert(nsegments
[*iHi
].end
== sHi
);
1425 /* Not that I'm overly paranoid or anything, definitely not :-) */
1429 /* Add SEG to the collection, deleting/truncating any it overlaps.
1430 This deals with all the tricky cases of splitting up segments as
1433 static void add_segment ( const NSegment
* seg
)
1435 Int i
, iLo
, iHi
, delta
;
1436 Bool segment_is_sane
;
1438 Addr sStart
= seg
->start
;
1439 Addr sEnd
= seg
->end
;
1441 aspacem_assert(sStart
<= sEnd
);
1442 aspacem_assert(VG_IS_PAGE_ALIGNED(sStart
));
1443 aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd
+1));
1445 segment_is_sane
= sane_NSegment(seg
);
1446 if (!segment_is_sane
) show_nsegment_full(0,-1,seg
);
1447 aspacem_assert(segment_is_sane
);
1449 split_nsegments_lo_and_hi( sStart
, sEnd
, &iLo
, &iHi
);
1451 /* Increase the reference count of SEG's name. We need to do this
1452 *before* decreasing the reference count of the names of the replaced
1453 segments. Consider the case where the segment name of SEG and one of
1454 the replaced segments are the same. If the refcount of that name is 1,
1455 then decrementing first would put the slot for that name on the free
1456 list. Attempting to increment the refcount later would then fail
1457 because the slot is no longer allocated. */
1458 ML_(am_inc_refcount
)(seg
->fnIdx
);
1460 /* Now iLo .. iHi inclusive is the range of segment indices which
1461 seg will replace. If we're replacing more than one segment,
1462 slide those above the range down to fill the hole. Before doing
1463 that decrement the reference counters for the segments names of
1464 the replaced segments. */
1465 for (i
= iLo
; i
<= iHi
; ++i
)
1466 ML_(am_dec_refcount
)(nsegments
[i
].fnIdx
);
1468 aspacem_assert(delta
>= 0);
1470 for (i
= iLo
; i
< nsegments_used
-delta
; i
++)
1471 nsegments
[i
] = nsegments
[i
+delta
];
1472 nsegments_used
-= delta
;
1475 nsegments
[iLo
] = *seg
;
1477 (void)preen_nsegments();
1478 if (0) VG_(am_show_nsegments
)(0,"AFTER preen (add_segment)");
1482 /* Clear out an NSegment record. */
1484 static void init_nsegment ( /*OUT*/NSegment
* seg
)
1489 seg
->smode
= SmFixed
;
1495 seg
->hasR
= seg
->hasW
= seg
->hasX
= seg
->hasT
= seg
->isCH
= False
;
1498 /* Make an NSegment which holds a reservation. */
1500 static void init_resvn ( /*OUT*/NSegment
* seg
, Addr start
, Addr end
)
1502 aspacem_assert(start
< end
);
1503 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
1504 aspacem_assert(VG_IS_PAGE_ALIGNED(end
+1));
1506 seg
->kind
= SkResvn
;
1512 /*-----------------------------------------------------------------*/
1514 /*--- Startup, including reading /proc/self/maps. ---*/
1516 /*-----------------------------------------------------------------*/
1518 static void read_maps_callback ( Addr addr
, SizeT len
, UInt prot
,
1519 ULong dev
, ULong ino
, Off64T offset
,
1520 const HChar
* filename
)
1523 init_nsegment( &seg
);
1525 seg
.end
= addr
+len
-1;
1528 seg
.offset
= offset
;
1529 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
1530 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
1531 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
1534 /* A segment in the initial /proc/self/maps is considered a FileV
1535 segment if either it has a file name associated with it or both its
1536 device and inode numbers are != 0. See bug #124528. */
1538 if (filename
|| (dev
!= 0 && ino
!= 0))
1541 # if defined(VGO_darwin)
1542 // GrP fixme no dev/ino on darwin
1545 # endif // defined(VGO_darwin)
1547 # if defined(VGP_arm_linux)
1548 /* The standard handling of entries read from /proc/self/maps will
1549 cause the faked up commpage segment to have type SkAnonV, which
1550 is a problem because it contains code we want the client to
1551 execute, and so later m_translate will segfault the client when
1552 it tries to go in there. Hence change the ownership of it here
1553 to the client (SkAnonC). The least-worst kludge I could think
1555 if (addr
== ARM_LINUX_FAKE_COMMPAGE_START
1556 && addr
+ len
== ARM_LINUX_FAKE_COMMPAGE_END1
1557 && seg
.kind
== SkAnonV
)
1559 # endif // defined(VGP_arm_linux)
1562 seg
.fnIdx
= ML_(am_allocate_segname
)( filename
);
1564 if (0) show_nsegment( 2,0, &seg
);
1565 add_segment( &seg
);
1569 VG_(am_is_valid_for_aspacem_minAddr
)( Addr addr
, const HChar
**errmsg
)
1571 const Addr min
= VKI_PAGE_SIZE
;
1572 #if VG_WORDSIZE == 4
1573 const Addr max
= 0x40000000; // 1Gb
1575 const Addr max
= 0x200000000; // 8Gb
1577 Bool ok
= VG_IS_PAGE_ALIGNED(addr
) && addr
>= min
&& addr
<= max
;
1582 const HChar fmt
[] = "Must be a page aligned address between "
1584 static HChar buf
[sizeof fmt
+ 2 * 16]; // large enough
1585 ML_(am_sprintf
)(buf
, fmt
, min
, max
);
1592 /* See description in pub_core_aspacemgr.h */
1593 Addr
VG_(am_startup
) ( Addr sp_at_startup
)
1596 Addr suggested_clstack_end
;
1598 aspacem_assert(sizeof(Word
) == sizeof(void*));
1599 aspacem_assert(sizeof(Addr
) == sizeof(void*));
1600 aspacem_assert(sizeof(SizeT
) == sizeof(void*));
1601 aspacem_assert(sizeof(SSizeT
) == sizeof(void*));
1603 /* Initialise the string table for segment names. */
1604 ML_(am_segnames_init
)();
1606 /* Check that we can store the largest imaginable dev, ino and
1607 offset numbers in an NSegment. */
1608 aspacem_assert(sizeof(seg
.dev
) == 8);
1609 aspacem_assert(sizeof(seg
.ino
) == 8);
1610 aspacem_assert(sizeof(seg
.offset
) == 8);
1611 aspacem_assert(sizeof(seg
.mode
) == 4);
1613 /* Add a single interval covering the entire address space. */
1614 init_nsegment(&seg
);
1616 seg
.start
= Addr_MIN
;
1621 aspacem_minAddr
= VG_(clo_aspacem_minAddr
);
1623 // --- Darwin -------------------------------------------
1624 #if defined(VGO_darwin)
1626 # if VG_WORDSIZE == 4
1627 aspacem_maxAddr
= (Addr
) 0xffffffff;
1629 aspacem_cStart
= aspacem_minAddr
;
1630 aspacem_vStart
= 0xf0000000; // 0xc0000000..0xf0000000 available
1632 aspacem_maxAddr
= (Addr
) 0x7fffffffffff;
1634 aspacem_cStart
= aspacem_minAddr
;
1635 aspacem_vStart
= 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1636 // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1639 suggested_clstack_end
= -1; // ignored; Mach-O specifies its stack
1641 // --- Solaris ------------------------------------------
1642 #elif defined(VGO_solaris)
1643 # if VG_WORDSIZE == 4
1645 Intended address space partitioning:
1647 ,--------------------------------, 0x00000000
1649 |--------------------------------|
1650 | initial stack given to V by OS |
1651 |--------------------------------| 0x08000000
1653 |--------------------------------|
1656 |--------------------------------|
1658 |--------------------------------| 0x58000000
1660 |--------------------------------|
1663 |--------------------------------|
1664 | dynamic shared objects |
1665 '--------------------------------' 0xffffffff
1669 /* Anonymous pages need to fit under user limit (USERLIMIT32)
1670 which is 4KB + 16MB below the top of the 32-bit range. */
1671 # ifdef ENABLE_INNER
1672 aspacem_maxAddr
= (Addr
)0x4fffffff; // 1.25GB
1673 aspacem_vStart
= (Addr
)0x40000000; // 1GB
1675 aspacem_maxAddr
= (Addr
)0xfefff000 - 1; // 4GB - 16MB - 4KB
1676 aspacem_vStart
= (Addr
)0x50000000; // 1.25GB
1678 # elif VG_WORDSIZE == 8
1680 Intended address space partitioning:
1682 ,--------------------------------, 0x00000000_00000000
1684 |--------------------------------| 0x00000000_00400000
1686 |--------------------------------|
1689 |--------------------------------|
1691 |--------------------------------| 0x00000000_58000000
1693 |--------------------------------|
1695 |--------------------------------|
1696 | dynamic shared objects |
1697 |--------------------------------| 0x0000001f_ffffffff
1700 |--------------------------------|
1701 | initial stack given to V by OS |
1702 '--------------------------------' 0xffffffff_ffffffff
1706 /* Kernel likes to place objects at the end of the address space.
1707 However accessing memory beyond 128GB makes memcheck slow
1708 (see memcheck/mc_main.c, internal representation). Therefore:
1709 - mmapobj() syscall is emulated so that libraries are subject to
1710 Valgrind's aspacemgr control
1711 - Kernel shared pages (such as schedctl and hrt) are left as they are
1712 because kernel cannot be told where they should be put */
1713 # ifdef ENABLE_INNER
1714 aspacem_maxAddr
= (Addr
) 0x0000000fffffffff; // 64GB
1715 aspacem_vStart
= (Addr
) 0x0000000800000000; // 32GB
1717 aspacem_maxAddr
= (Addr
) 0x0000001fffffffff; // 128GB
1718 aspacem_vStart
= (Addr
) 0x0000001000000000; // 64GB
1721 # error "Unknown word size"
1724 aspacem_cStart
= aspacem_minAddr
;
1725 # ifdef ENABLE_INNER
1726 suggested_clstack_end
= (Addr
) 0x37ff0000 - 1; // 64kB below V's text
1728 suggested_clstack_end
= (Addr
) 0x57ff0000 - 1; // 64kB below V's text
1731 // --- Linux --------------------------------------------
1734 /* Establish address limits and block out unusable parts
1737 VG_(debugLog
)(2, "aspacem",
1738 " sp_at_startup = 0x%010lx (supplied)\n",
1741 # if VG_WORDSIZE == 8
1742 aspacem_maxAddr
= (Addr
)0x2000000000ULL
- 1; // 128G
1743 # ifdef ENABLE_INNER
1744 { Addr cse
= VG_PGROUNDDN( sp_at_startup
) - 1;
1745 if (aspacem_maxAddr
> cse
)
1746 aspacem_maxAddr
= cse
;
1750 aspacem_maxAddr
= VG_PGROUNDDN( sp_at_startup
) - 1;
1753 aspacem_cStart
= aspacem_minAddr
;
1754 aspacem_vStart
= VG_PGROUNDUP(aspacem_minAddr
1755 + (aspacem_maxAddr
- aspacem_minAddr
+ 1) / 2);
1756 # ifdef ENABLE_INNER
1757 aspacem_vStart
-= 0x20000000; // 512M
1760 suggested_clstack_end
= aspacem_maxAddr
- 16*1024*1024ULL
1764 // --- (end) --------------------------------------------
1766 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr
));
1767 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr
+ 1));
1768 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart
));
1769 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart
));
1770 aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_end
+ 1));
1772 VG_(debugLog
)(2, "aspacem",
1773 " minAddr = 0x%010lx (computed)\n",
1775 VG_(debugLog
)(2, "aspacem",
1776 " maxAddr = 0x%010lx (computed)\n",
1778 VG_(debugLog
)(2, "aspacem",
1779 " cStart = 0x%010lx (computed)\n",
1781 VG_(debugLog
)(2, "aspacem",
1782 " vStart = 0x%010lx (computed)\n",
1784 VG_(debugLog
)(2, "aspacem",
1785 "suggested_clstack_end = 0x%010lx (computed)\n",
1786 suggested_clstack_end
);
1788 if (aspacem_cStart
> Addr_MIN
) {
1789 init_resvn(&seg
, Addr_MIN
, aspacem_cStart
-1);
1792 if (aspacem_maxAddr
< Addr_MAX
) {
1793 init_resvn(&seg
, aspacem_maxAddr
+1, Addr_MAX
);
1797 /* Create a 1-page reservation at the notional initial
1798 client/valgrind boundary. This isn't strictly necessary, but
1799 because the advisor does first-fit and starts searches for
1800 valgrind allocations at the boundary, this is kind of necessary
1801 in order to get it to start allocating in the right place. */
1802 init_resvn(&seg
, aspacem_vStart
, aspacem_vStart
+ VKI_PAGE_SIZE
- 1);
1805 VG_(am_show_nsegments
)(2, "Initial layout");
1807 VG_(debugLog
)(2, "aspacem", "Reading /proc/self/maps\n");
1808 parse_procselfmaps( read_maps_callback
, NULL
);
1809 /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1810 (iow, hands to its callbacks) a description of the ARM Commpage,
1811 since that's not listed in /proc/self/maps (kernel bug IMO). We
1812 have to fake up its existence in parse_procselfmaps and not
1813 merely add it here as an extra segment, because doing the latter
1814 causes sync checking to fail: we see we have an extra segment in
1815 the segments array, which isn't listed in /proc/self/maps.
1816 Hence we must make it appear that /proc/self/maps contained this
1817 segment all along. Sigh. */
1819 VG_(am_show_nsegments
)(2, "With contents of /proc/self/maps");
1822 return suggested_clstack_end
;
1826 /*-----------------------------------------------------------------*/
1828 /*--- The core query-notify mechanism. ---*/
1830 /*-----------------------------------------------------------------*/
1832 /* Query aspacem to ask where a mapping should go. */
1834 Addr
VG_(am_get_advisory
) ( const MapRequest
* req
,
1838 /* This function implements allocation policy.
1840 The nature of the allocation request is determined by req, which
1841 specifies the start and length of the request and indicates
1842 whether the start address is mandatory, a hint, or irrelevant,
1843 and by forClient, which says whether this is for the client or
1846 Return values: the request can be vetoed (*ok is set to False),
1847 in which case the caller should not attempt to proceed with
1848 making the mapping. Otherwise, *ok is set to True, the caller
1849 may proceed, and the preferred address at which the mapping
1850 should happen is returned.
1852 Note that this is an advisory system only: the kernel can in
1853 fact do whatever it likes as far as placement goes, and we have
1854 no absolute control over it.
1856 Allocations will never be granted in a reserved area.
1858 The Default Policy is:
1860 Search the address space for two free intervals: one of them
1861 big enough to contain the request without regard to the
1862 specified address (viz, as if it was a floating request) and
1863 the other being able to contain the request at the specified
1864 address (viz, as if were a fixed request). Then, depending on
1865 the outcome of the search and the kind of request made, decide
1866 whether the request is allowable and what address to advise.
1868 The Default Policy is overridden by Policy Exception #1:
1870 If the request is for a fixed client map, we are prepared to
1871 grant it providing all areas inside the request are either
1872 free, reservations, or mappings belonging to the client. In
1873 other words we are prepared to let the client trash its own
1874 mappings if it wants to.
1876 The Default Policy is overridden by Policy Exception #2:
1878 If the request is for a hinted client map, we are prepared to
1879 grant it providing all areas inside the request are either
1880 free or reservations. In other words we are prepared to let
1881 the client have a hinted mapping anywhere it likes provided
1882 it does not trash either any of its own mappings or any of
1883 valgrind's mappings.
1886 Addr holeStart
, holeEnd
, holeLen
;
1887 Bool fixed_not_required
;
1889 #if defined(VGO_solaris)
1890 Addr startPoint
= forClient
? aspacem_vStart
- 1 : aspacem_maxAddr
- 1;
1892 Addr startPoint
= forClient
? aspacem_cStart
: aspacem_vStart
;
1893 #endif /* VGO_solaris */
1895 Addr reqStart
= req
->rkind
==MFixed
|| req
->rkind
==MHint
? req
->start
: 0;
1896 Addr reqEnd
= reqStart
+ req
->len
- 1;
1897 Addr reqLen
= req
->len
;
1899 /* These hold indices for segments found during search, or -1 if not
1904 aspacem_assert(nsegments_used
> 0);
1907 VG_(am_show_nsegments
)(0,"getAdvisory");
1908 VG_(debugLog
)(0,"aspacem", "getAdvisory 0x%lx %lu\n",
1909 req
->start
, req
->len
);
1912 /* Reject zero-length requests */
1913 if (req
->len
== 0) {
1918 /* Reject wraparounds */
1919 if (req
->start
+ req
->len
< req
->start
) {
1924 /* ------ Implement Policy Exception #1 ------ */
1926 if (forClient
&& req
->rkind
== MFixed
) {
1927 Int iLo
= find_nsegment_idx(reqStart
);
1928 Int iHi
= find_nsegment_idx(reqEnd
);
1930 for (i
= iLo
; i
<= iHi
; i
++) {
1931 if (nsegments
[i
].kind
== SkFree
1932 || nsegments
[i
].kind
== SkFileC
1933 || nsegments
[i
].kind
== SkAnonC
1934 || nsegments
[i
].kind
== SkShmC
1935 || nsegments
[i
].kind
== SkResvn
) {
1943 /* Acceptable. Granted. */
1947 /* Not acceptable. Fail. */
1952 /* ------ Implement Policy Exception #2 ------ */
1954 if (forClient
&& req
->rkind
== MHint
) {
1955 Int iLo
= find_nsegment_idx(reqStart
);
1956 Int iHi
= find_nsegment_idx(reqEnd
);
1958 for (i
= iLo
; i
<= iHi
; i
++) {
1959 if (nsegments
[i
].kind
== SkFree
1960 || nsegments
[i
].kind
== SkResvn
) {
1968 /* Acceptable. Granted. */
1972 /* Not acceptable. Fall through to the default policy. */
1975 /* ------ Implement the Default Policy ------ */
1977 /* Don't waste time looking for a fixed match if not requested to. */
1978 fixed_not_required
= req
->rkind
== MAny
|| req
->rkind
== MAlign
;
1980 i
= find_nsegment_idx(startPoint
);
1982 #if defined(VGO_solaris)
1983 # define UPDATE_INDEX(index) \
1986 (index) = nsegments_used - 1;
1987 # define ADVISE_ADDRESS(segment) \
1988 VG_PGROUNDDN((segment)->end + 1 - reqLen)
1989 # define ADVISE_ADDRESS_ALIGNED(segment) \
1990 VG_ROUNDDN((segment)->end + 1 - reqLen, req->start)
1994 # define UPDATE_INDEX(index) \
1996 if ((index) >= nsegments_used) \
1998 # define ADVISE_ADDRESS(segment) \
2000 # define ADVISE_ADDRESS_ALIGNED(segment) \
2001 VG_ROUNDUP((segment)->start, req->start)
2002 #endif /* VGO_solaris */
2004 /* Examine holes from index i back round to i-1. Record the
2005 index first fixed hole and the first floating hole which would
2006 satisfy the request. */
2007 for (j
= 0; j
< nsegments_used
; j
++) {
2009 if (nsegments
[i
].kind
!= SkFree
) {
2014 holeStart
= nsegments
[i
].start
;
2015 holeEnd
= nsegments
[i
].end
;
2018 aspacem_assert(holeStart
<= holeEnd
);
2019 aspacem_assert(aspacem_minAddr
<= holeStart
);
2020 aspacem_assert(holeEnd
<= aspacem_maxAddr
);
2022 if (req
->rkind
== MAlign
) {
2023 holeStart
= VG_ROUNDUP(holeStart
, req
->start
);
2024 if (holeStart
>= holeEnd
) {
2025 /* This hole can't be used. */
2031 /* See if it's any use to us. */
2032 holeLen
= holeEnd
- holeStart
+ 1;
2034 if (fixedIdx
== -1 && holeStart
<= reqStart
&& reqEnd
<= holeEnd
)
2037 if (floatIdx
== -1 && holeLen
>= reqLen
)
2040 /* Don't waste time searching once we've found what we wanted. */
2041 if ((fixed_not_required
|| fixedIdx
>= 0) && floatIdx
>= 0)
2047 aspacem_assert(fixedIdx
>= -1 && fixedIdx
< nsegments_used
);
2049 aspacem_assert(nsegments
[fixedIdx
].kind
== SkFree
);
2051 aspacem_assert(floatIdx
>= -1 && floatIdx
< nsegments_used
);
2053 aspacem_assert(nsegments
[floatIdx
].kind
== SkFree
);
2057 /* Now see if we found anything which can satisfy the request. */
2058 switch (req
->rkind
) {
2060 if (fixedIdx
>= 0) {
2069 if (fixedIdx
>= 0) {
2073 if (floatIdx
>= 0) {
2075 return ADVISE_ADDRESS(&nsegments
[floatIdx
]);
2080 if (floatIdx
>= 0) {
2082 return ADVISE_ADDRESS(&nsegments
[floatIdx
]);
2087 if (floatIdx
>= 0) {
2089 return ADVISE_ADDRESS_ALIGNED(&nsegments
[floatIdx
]);
2098 ML_(am_barf
)("getAdvisory: unknown request kind");
2103 #undef ADVISE_ADDRESS
2104 #undef ADVISE_ADDRESS_ALIGNED
2107 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
2108 fixed requests. If start is zero, a floating request is issued; if
2109 nonzero, a fixed request at that address is issued. Same comments
2110 about return values apply. */
2112 Addr
VG_(am_get_advisory_client_simple
) ( Addr start
, SizeT len
,
2116 mreq
.rkind
= start
==0 ? MAny
: MFixed
;
2119 return VG_(am_get_advisory
)( &mreq
, True
/*forClient*/, ok
);
2122 /* Similar to VG_(am_find_nsegment) but only returns free segments. */
2123 static NSegment
const * VG_(am_find_free_nsegment
) ( Addr a
)
2125 Int i
= find_nsegment_idx(a
);
2126 aspacem_assert(i
>= 0 && i
< nsegments_used
);
2127 aspacem_assert(nsegments
[i
].start
<= a
);
2128 aspacem_assert(a
<= nsegments
[i
].end
);
2129 if (nsegments
[i
].kind
== SkFree
)
2130 return &nsegments
[i
];
2135 Bool
VG_(am_covered_by_single_free_segment
)
2136 ( Addr start
, SizeT len
)
2138 NSegment
const* segLo
= VG_(am_find_free_nsegment
)( start
);
2139 NSegment
const* segHi
= VG_(am_find_free_nsegment
)( start
+ len
- 1 );
2141 return segLo
!= NULL
&& segHi
!= NULL
&& segLo
== segHi
;
2145 /* Notifies aspacem that the client completed an mmap successfully.
2146 The segment array is updated accordingly. If the returned Bool is
2147 True, the caller should immediately discard translations from the
2148 specified address range. */
2151 VG_(am_notify_client_mmap
)( Addr a
, SizeT len
, UInt prot
, UInt flags
,
2152 Int fd
, Off64T offset
)
2154 HChar buf
[VKI_PATH_MAX
];
2160 aspacem_assert(len
> 0);
2161 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
2162 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2163 aspacem_assert(VG_IS_PAGE_ALIGNED(offset
));
2165 /* Discard is needed if any of the just-trashed range had T. */
2166 needDiscard
= any_Ts_in_range( a
, len
);
2168 init_nsegment( &seg
);
2169 seg
.kind
= (flags
& VKI_MAP_ANONYMOUS
) ? SkAnonC
: SkFileC
;
2171 seg
.end
= a
+ len
- 1;
2172 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2173 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2174 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2175 if (!(flags
& VKI_MAP_ANONYMOUS
)) {
2176 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2177 seg
.offset
= offset
;
2178 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2183 if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2184 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2187 add_segment( &seg
);
2192 /* Notifies aspacem that the client completed a shmat successfully.
2193 The segment array is updated accordingly. If the returned Bool is
2194 True, the caller should immediately discard translations from the
2195 specified address range. */
2198 VG_(am_notify_client_shmat
)( Addr a
, SizeT len
, UInt prot
)
2203 aspacem_assert(len
> 0);
2204 aspacem_assert(VG_IS_PAGE_ALIGNED(a
));
2205 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2207 /* Discard is needed if any of the just-trashed range had T. */
2208 needDiscard
= any_Ts_in_range( a
, len
);
2210 init_nsegment( &seg
);
2213 seg
.end
= a
+ len
- 1;
2215 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2216 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2217 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2218 add_segment( &seg
);
2223 /* Notifies aspacem that an mprotect was completed successfully. The
2224 segment array is updated accordingly. Note, as with
2225 VG_(am_notify_munmap), it is not the job of this function to reject
2226 stupid mprotects, for example the client doing mprotect of
2227 non-client areas. Such requests should be intercepted earlier, by
2228 the syscall wrapper for mprotect. This function merely records
2229 whatever it is told. If the returned Bool is True, the caller
2230 should immediately discard translations from the specified address
2233 Bool
VG_(am_notify_mprotect
)( Addr start
, SizeT len
, UInt prot
)
2236 Bool newR
, newW
, newX
, needDiscard
;
2238 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2239 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2244 newR
= toBool(prot
& VKI_PROT_READ
);
2245 newW
= toBool(prot
& VKI_PROT_WRITE
);
2246 newX
= toBool(prot
& VKI_PROT_EXEC
);
2248 /* Discard is needed if we're dumping X permission */
2249 needDiscard
= any_Ts_in_range( start
, len
) && !newX
;
2251 split_nsegments_lo_and_hi( start
, start
+len
-1, &iLo
, &iHi
);
2253 iLo
= find_nsegment_idx(start
);
2254 iHi
= find_nsegment_idx(start
+ len
- 1);
2256 for (i
= iLo
; i
<= iHi
; i
++) {
2257 /* Apply the permissions to all relevant segments. */
2258 switch (nsegments
[i
].kind
) {
2259 case SkAnonC
: case SkAnonV
: case SkFileC
: case SkFileV
: case SkShmC
:
2260 nsegments
[i
].hasR
= newR
;
2261 nsegments
[i
].hasW
= newW
;
2262 nsegments
[i
].hasX
= newX
;
2263 aspacem_assert(sane_NSegment(&nsegments
[i
]));
2270 /* Changing permissions could have made previously un-mergable
2271 segments mergeable. Therefore have to re-preen them. */
2272 (void)preen_nsegments();
2278 /* Notifies aspacem that an munmap completed successfully. The
2279 segment array is updated accordingly. As with
2280 VG_(am_notify_mprotect), we merely record the given info, and don't
2281 check it for sensibleness. If the returned Bool is True, the
2282 caller should immediately discard translations from the specified
2285 Bool
VG_(am_notify_munmap
)( Addr start
, SizeT len
)
2289 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2290 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2295 needDiscard
= any_Ts_in_range( start
, len
);
2297 init_nsegment( &seg
);
2299 seg
.end
= start
+ len
- 1;
2301 /* The segment becomes unused (free). Segments from above
2302 aspacem_maxAddr were originally SkResvn and so we make them so
2303 again. Note, this isn't really right when the segment straddles
2304 the aspacem_maxAddr boundary - then really it should be split in
2305 two, the lower part marked as SkFree and the upper part as
2306 SkResvn. Ah well. */
2307 if (start
> aspacem_maxAddr
2308 && /* check previous comparison is meaningful */
2309 aspacem_maxAddr
< Addr_MAX
)
2312 /* Ditto for segments from below aspacem_minAddr. */
2313 if (seg
.end
< aspacem_minAddr
&& aspacem_minAddr
> 0)
2318 add_segment( &seg
);
2320 /* Unmapping could create two adjacent free segments, so a preen is
2321 needed. add_segment() will do that, so no need to here. */
2327 /*-----------------------------------------------------------------*/
2329 /*--- Handling mappings which do not arise directly from the ---*/
2330 /*--- simulation of the client. ---*/
2332 /*-----------------------------------------------------------------*/
2334 /* --- --- --- map, unmap, protect --- --- --- */
2336 /* Map a file at a fixed address for the client, and update the
2337 segment array accordingly. */
2339 SysRes
VG_(am_mmap_file_fixed_client
)
2340 ( Addr start
, SizeT length
, UInt prot
, Int fd
, Off64T offset
)
2342 UInt flags
= VKI_MAP_FIXED
| VKI_MAP_PRIVATE
;
2343 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2347 SysRes
VG_(am_mmap_file_fixed_client_flags
)
2348 ( Addr start
, SizeT length
, UInt prot
, UInt flags
, Int fd
, Off64T offset
)
2350 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2354 SysRes
VG_(am_mmap_named_file_fixed_client
)
2355 ( Addr start
, SizeT length
, UInt prot
, Int fd
, Off64T offset
, const HChar
*name
)
2357 UInt flags
= VKI_MAP_FIXED
| VKI_MAP_PRIVATE
;
2358 return VG_(am_mmap_named_file_fixed_client_flags
)(start
, length
, prot
, flags
,
2362 SysRes
VG_(am_mmap_named_file_fixed_client_flags
)
2363 ( Addr start
, SizeT length
, UInt prot
, UInt flags
,
2364 Int fd
, Off64T offset
, const HChar
*name
)
2373 HChar buf
[VKI_PATH_MAX
];
2375 /* Not allowable. */
2377 || !VG_IS_PAGE_ALIGNED(start
)
2378 || !VG_IS_PAGE_ALIGNED(offset
))
2379 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2381 /* Ask for an advisory. If it's negative, fail immediately. */
2385 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2386 if (!ok
|| advised
!= start
)
2387 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2389 /* We have been advised that the mapping is allowable at the
2390 specified address. So hand it off to the kernel, and propagate
2391 any resulting failure immediately. */
2392 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2393 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2394 start
, length
, prot
, flags
,
2397 if (sr_isError(sres
))
2400 if (sr_Res(sres
) != start
) {
2401 /* I don't think this can happen. It means the kernel made a
2402 fixed map succeed but not at the requested location. Try to
2403 repair the damage, then return saying the mapping failed. */
2404 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2405 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2408 /* Ok, the mapping succeeded. Now notify the interval map. */
2409 init_nsegment( &seg
);
2412 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2413 seg
.offset
= offset
;
2414 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2415 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2416 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2417 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2423 seg
.fnIdx
= ML_(am_allocate_segname
)( name
);
2424 } else if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2425 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2427 add_segment( &seg
);
2434 /* Map anonymously at a fixed address for the client, and update
2435 the segment array accordingly. */
2437 SysRes
VG_(am_mmap_anon_fixed_client
) ( Addr start
, SizeT length
, UInt prot
)
2445 /* Not allowable. */
2446 if (length
== 0 || !VG_IS_PAGE_ALIGNED(start
))
2447 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2449 /* Ask for an advisory. If it's negative, fail immediately. */
2453 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2454 if (!ok
|| advised
!= start
)
2455 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2457 /* We have been advised that the mapping is allowable at the
2458 specified address. So hand it off to the kernel, and propagate
2459 any resulting failure immediately. */
2460 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2461 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2462 start
, length
, prot
,
2463 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2466 if (sr_isError(sres
))
2469 if (sr_Res(sres
) != start
) {
2470 /* I don't think this can happen. It means the kernel made a
2471 fixed map succeed but not at the requested location. Try to
2472 repair the damage, then return saying the mapping failed. */
2473 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2474 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2477 /* Ok, the mapping succeeded. Now notify the interval map. */
2478 init_nsegment( &seg
);
2481 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2482 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2483 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2484 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2485 add_segment( &seg
);
2492 /* Map anonymously at an unconstrained address for the client, and
2493 update the segment array accordingly. */
2495 static SysRes
am_mmap_anon_float_client ( SizeT length
, Int prot
, Bool isCH
)
2503 /* Not allowable. */
2505 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2507 /* Ask for an advisory. If it's negative, fail immediately. */
2511 advised
= VG_(am_get_advisory
)( &req
, True
/*forClient*/, &ok
);
2513 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2515 /* We have been advised that the mapping is allowable at the
2516 advised address. So hand it off to the kernel, and propagate
2517 any resulting failure immediately. */
2518 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2519 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2520 advised
, length
, prot
,
2521 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2524 if (sr_isError(sres
))
2527 if (sr_Res(sres
) != advised
) {
2528 /* I don't think this can happen. It means the kernel made a
2529 fixed map succeed but not at the requested location. Try to
2530 repair the damage, then return saying the mapping failed. */
2531 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2532 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2535 /* Ok, the mapping succeeded. Now notify the interval map. */
2536 init_nsegment( &seg
);
2538 seg
.start
= advised
;
2539 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2540 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2541 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2542 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2544 add_segment( &seg
);
2550 SysRes
VG_(am_mmap_anon_float_client
) ( SizeT length
, Int prot
)
2552 return am_mmap_anon_float_client (length
, prot
, False
/* isCH */);
2555 /* Map anonymously at an unconstrained address for V, and update the
2556 segment array accordingly. This is fundamentally how V allocates
2557 itself more address space when needed. */
2559 SysRes
VG_(am_mmap_anon_float_valgrind
)( SizeT length
)
2567 /* Not allowable. */
2569 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2571 /* Ask for an advisory. If it's negative, fail immediately. */
2575 advised
= VG_(am_get_advisory
)( &req
, False
/*forClient*/, &ok
);
2577 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2579 // On Darwin, for anonymous maps you can pass in a tag which is used by
2580 // programs like vmmap for statistical purposes.
2581 #ifndef VM_TAG_VALGRIND
2582 # define VM_TAG_VALGRIND 0
2585 /* We have been advised that the mapping is allowable at the
2586 specified address. So hand it off to the kernel, and propagate
2587 any resulting failure immediately. */
2588 /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2589 another thread can pre-empt our spot. [At one point on the DARWIN
2590 branch the VKI_MAP_FIXED was commented out; unclear if this is
2591 necessary or not given the second Darwin-only call that immediately
2592 follows if this one fails. --njn]
2593 Also, an inner valgrind cannot observe the mmap syscalls done by
2594 the outer valgrind. The outer Valgrind might make the mmap
2595 fail here, as the inner valgrind believes that a segment is free,
2596 while it is in fact used by the outer valgrind.
2597 So, for an inner valgrind, similarly to DARWIN, if the fixed mmap
2598 fails, retry the mmap without map fixed.
2599 This is a kludge which on linux is only activated for the inner.
2600 The state of the inner aspacemgr is not made correct by this kludge
2601 and so a.o. VG_(am_do_sync_check) could fail.
2602 A proper solution implies a better collaboration between the
2603 inner and the outer (e.g. inner VG_(am_get_advisory) should do
2604 a client request to call the outer VG_(am_get_advisory). */
2605 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2607 VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
,
2608 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2611 #if defined(VGO_darwin) || defined(ENABLE_INNER)
2612 /* Kludge on Darwin and inner linux if the fixed mmap failed. */
2613 if (sr_isError(sres
)) {
2614 /* try again, ignoring the advisory */
2615 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2617 VKI_PROT_READ
|VKI_PROT_WRITE
|VKI_PROT_EXEC
,
2618 /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
2623 if (sr_isError(sres
))
2626 #if defined(VGO_linux) && !defined(ENABLE_INNER)
2627 /* Doing the check only in linux not inner, as the below
2628 check can fail when the kludge above has been used. */
2629 if (sr_Res(sres
) != advised
) {
2630 /* I don't think this can happen. It means the kernel made a
2631 fixed map succeed but not at the requested location. Try to
2632 repair the damage, then return saying the mapping failed. */
2633 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2634 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2638 /* Ok, the mapping succeeded. Now notify the interval map. */
2639 init_nsegment( &seg
);
2641 seg
.start
= sr_Res(sres
);
2642 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2646 add_segment( &seg
);
2652 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2654 void* VG_(am_shadow_alloc
)(SizeT size
)
2656 SysRes sres
= VG_(am_mmap_anon_float_valgrind
)( size
);
2657 return sr_isError(sres
) ? NULL
: (void*)sr_Res(sres
);
2660 /* Map a file at an unconstrained address for V, and update the
2661 segment array accordingly. Use the provided flags */
2663 static SysRes
VG_(am_mmap_file_float_valgrind_flags
) ( SizeT length
, UInt prot
,
2665 Int fd
, Off64T offset
)
2674 HChar buf
[VKI_PATH_MAX
];
2676 /* Not allowable. */
2677 if (length
== 0 || !VG_IS_PAGE_ALIGNED(offset
))
2678 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2680 /* Ask for an advisory. If it's negative, fail immediately. */
2683 #if defined(VGA_arm) || defined(VGA_arm64) \
2684 || defined(VGA_mips32) || defined(VGA_mips64)
2685 aspacem_assert(VKI_SHMLBA
>= VKI_PAGE_SIZE
);
2687 aspacem_assert(VKI_SHMLBA
== VKI_PAGE_SIZE
);
2689 if ((VKI_SHMLBA
> VKI_PAGE_SIZE
) && (VKI_MAP_SHARED
& flags
)) {
2690 /* arm-linux only. See ML_(generic_PRE_sys_shmat) and bug 290974 */
2691 req
.len
= length
+ VKI_SHMLBA
- VKI_PAGE_SIZE
;
2695 advised
= VG_(am_get_advisory
)( &req
, False
/*forClient*/, &ok
);
2697 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2698 if ((VKI_SHMLBA
> VKI_PAGE_SIZE
) && (VKI_MAP_SHARED
& flags
))
2699 advised
= VG_ROUNDUP(advised
, VKI_SHMLBA
);
2701 /* We have been advised that the mapping is allowable at the
2702 specified address. So hand it off to the kernel, and propagate
2703 any resulting failure immediately. */
2704 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
2705 advised
, length
, prot
,
2709 if (sr_isError(sres
))
2712 if (sr_Res(sres
) != advised
) {
2713 /* I don't think this can happen. It means the kernel made a
2714 fixed map succeed but not at the requested location. Try to
2715 repair the damage, then return saying the mapping failed. */
2716 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), length
);
2717 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2720 /* Ok, the mapping succeeded. Now notify the interval map. */
2721 init_nsegment( &seg
);
2723 seg
.start
= sr_Res(sres
);
2724 seg
.end
= seg
.start
+ VG_PGROUNDUP(length
) - 1;
2725 seg
.offset
= offset
;
2726 seg
.hasR
= toBool(prot
& VKI_PROT_READ
);
2727 seg
.hasW
= toBool(prot
& VKI_PROT_WRITE
);
2728 seg
.hasX
= toBool(prot
& VKI_PROT_EXEC
);
2729 if (ML_(am_get_fd_d_i_m
)(fd
, &dev
, &ino
, &mode
)) {
2734 if (ML_(am_resolve_filename
)(fd
, buf
, VKI_PATH_MAX
)) {
2735 seg
.fnIdx
= ML_(am_allocate_segname
)( buf
);
2737 add_segment( &seg
);
2742 /* Map privately a file at an unconstrained address for V, and update the
2743 segment array accordingly. This is used by V for transiently
2744 mapping in object files to read their debug info. */
2746 SysRes
VG_(am_mmap_file_float_valgrind
) ( SizeT length
, UInt prot
,
2747 Int fd
, Off64T offset
)
2749 return VG_(am_mmap_file_float_valgrind_flags
) (length
, prot
,
2750 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
,
2754 SysRes
VG_(am_shared_mmap_file_float_valgrind
)
2755 ( SizeT length
, UInt prot
, Int fd
, Off64T offset
)
2757 return VG_(am_mmap_file_float_valgrind_flags
) (length
, prot
,
2758 VKI_MAP_FIXED
|VKI_MAP_SHARED
,
2762 /* Similar to VG_(am_mmap_anon_float_client) but also
2763 marks the segment as containing the client heap. This is for the benefit
2764 of the leak checker which needs to be able to identify such segments
2765 so as not to use them as sources of roots during leak checks. */
2766 SysRes
VG_(am_mmap_client_heap
) ( SizeT length
, Int prot
)
2768 return am_mmap_anon_float_client (length
, prot
, True
/* isCH */);
2771 /* --- --- munmap helper --- --- */
2774 SysRes
am_munmap_both_wrk ( /*OUT*/Bool
* need_discard
,
2775 Addr start
, SizeT len
, Bool forClient
)
2780 /* Be safe with this regardless of return path. */
2781 *need_discard
= False
;
2783 if (!VG_IS_PAGE_ALIGNED(start
))
2787 *need_discard
= False
;
2788 return VG_(mk_SysRes_Success
)( 0 );
2791 if (start
+ len
< len
)
2794 len
= VG_PGROUNDUP(len
);
2795 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2796 aspacem_assert(VG_IS_PAGE_ALIGNED(len
));
2799 if (!VG_(am_is_valid_for_client_or_free_or_resvn
)
2800 ( start
, len
, VKI_PROT_NONE
))
2803 if (!VG_(am_is_valid_for_valgrind
)
2804 ( start
, len
, VKI_PROT_NONE
))
2808 d
= any_Ts_in_range( start
, len
);
2810 sres
= ML_(am_do_munmap_NO_NOTIFY
)( start
, len
);
2811 if (sr_isError(sres
))
2814 VG_(am_notify_munmap
)( start
, len
);
2820 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
2823 /* Unmap the given address range and update the segment array
2824 accordingly. This fails if the range isn't valid for the client.
2825 If *need_discard is True after a successful return, the caller
2826 should immediately discard translations from the specified address
2829 SysRes
VG_(am_munmap_client
)( /*OUT*/Bool
* need_discard
,
2830 Addr start
, SizeT len
)
2832 return am_munmap_both_wrk( need_discard
, start
, len
, True
/*client*/ );
2835 /* Unmap the given address range and update the segment array
2836 accordingly. This fails if the range isn't valid for valgrind. */
2838 SysRes
VG_(am_munmap_valgrind
)( Addr start
, SizeT len
)
2841 SysRes r
= am_munmap_both_wrk( &need_discard
,
2842 start
, len
, False
/*valgrind*/ );
2843 /* If this assertion fails, it means we allowed translations to be
2844 made from a V-owned section. Which shouldn't happen. */
2846 aspacem_assert(!need_discard
);
2850 /* Let (start,len) denote an area within a single Valgrind-owned
2851 segment (anon or file). Change the ownership of [start, start+len)
2852 to the client instead. Fails if (start,len) does not denote a
2853 suitable segment. */
2855 Bool
VG_(am_change_ownership_v_to_c
)( Addr start
, SizeT len
)
2861 if (start
+ len
< start
)
2863 if (!VG_IS_PAGE_ALIGNED(start
) || !VG_IS_PAGE_ALIGNED(len
))
2866 i
= find_nsegment_idx(start
);
2867 if (nsegments
[i
].kind
!= SkFileV
&& nsegments
[i
].kind
!= SkAnonV
)
2869 if (start
+len
-1 > nsegments
[i
].end
)
2872 aspacem_assert(start
>= nsegments
[i
].start
);
2873 aspacem_assert(start
+len
-1 <= nsegments
[i
].end
);
2875 /* This scheme is like how mprotect works: split the to-be-changed
2876 range into its own segment(s), then mess with them (it). There
2877 should be only one. */
2878 split_nsegments_lo_and_hi( start
, start
+len
-1, &iLo
, &iHi
);
2879 aspacem_assert(iLo
== iHi
);
2880 switch (nsegments
[iLo
].kind
) {
2881 case SkFileV
: nsegments
[iLo
].kind
= SkFileC
; break;
2882 case SkAnonV
: nsegments
[iLo
].kind
= SkAnonC
; break;
2883 default: aspacem_assert(0); /* can't happen - guarded above */
2890 /* Set the 'hasT' bit on the segment containing ADDR indicating that
2891 translations have or may have been taken from this segment. ADDR is
2892 expected to belong to a client segment. */
2893 void VG_(am_set_segment_hasT
)( Addr addr
)
2895 Int i
= find_nsegment_idx(addr
);
2896 SegKind kind
= nsegments
[i
].kind
;
2897 aspacem_assert(kind
== SkAnonC
|| kind
== SkFileC
|| kind
== SkShmC
);
2898 nsegments
[i
].hasT
= True
;
2902 /* --- --- --- reservations --- --- --- */
2904 /* Create a reservation from START .. START+LENGTH-1, with the given
2905 ShrinkMode. When checking whether the reservation can be created,
2906 also ensure that at least abs(EXTRA) extra free bytes will remain
2907 above (> 0) or below (< 0) the reservation.
2909 The reservation will only be created if it, plus the extra-zone,
2910 falls entirely within a single free segment. The returned Bool
2911 indicates whether the creation succeeded. */
2913 Bool
VG_(am_create_reservation
) ( Addr start
, SizeT length
,
2914 ShrinkMode smode
, SSizeT extra
)
2919 /* start and end, not taking into account the extra space. */
2920 Addr start1
= start
;
2921 Addr end1
= start
+ length
- 1;
2923 /* start and end, taking into account the extra space. */
2924 Addr start2
= start1
;
2927 if (extra
< 0) start2
+= extra
; // this moves it down :-)
2928 if (extra
> 0) end2
+= extra
;
2930 aspacem_assert(VG_IS_PAGE_ALIGNED(start
));
2931 aspacem_assert(VG_IS_PAGE_ALIGNED(start
+length
));
2932 aspacem_assert(VG_IS_PAGE_ALIGNED(start2
));
2933 aspacem_assert(VG_IS_PAGE_ALIGNED(end2
+1));
2935 startI
= find_nsegment_idx( start2
);
2936 endI
= find_nsegment_idx( end2
);
2938 /* If the start and end points don't fall within the same (free)
2939 segment, we're hosed. This does rely on the assumption that all
2940 mergeable adjacent segments can be merged, but add_segment()
2941 should ensure that. */
2945 if (nsegments
[startI
].kind
!= SkFree
)
2948 /* Looks good - make the reservation. */
2949 aspacem_assert(nsegments
[startI
].start
<= start2
);
2950 aspacem_assert(end2
<= nsegments
[startI
].end
);
2952 init_nsegment( &seg
);
2954 seg
.start
= start1
; /* NB: extra space is not included in the
2958 add_segment( &seg
);
2965 /* ADDR is the start address of an anonymous client mapping. This fn extends
2966 the mapping by DELTA bytes, taking the space from a reservation section
2967 which must be adjacent. If DELTA is positive, the segment is
2968 extended forwards in the address space, and the reservation must be
2969 the next one along. If DELTA is negative, the segment is extended
2970 backwards in the address space and the reservation must be the
2971 previous one. DELTA must be page aligned. abs(DELTA) must not
2972 exceed the size of the reservation segment minus one page, that is,
2973 the reservation segment after the operation must be at least one
2974 page long. The function returns a pointer to the resized segment. */
2976 const NSegment
*VG_(am_extend_into_adjacent_reservation_client
)( Addr addr
,
2986 segA
= find_nsegment_idx(addr
);
2987 aspacem_assert(nsegments
[segA
].kind
== SkAnonC
);
2990 return nsegments
+ segA
;
2992 prot
= (nsegments
[segA
].hasR
? VKI_PROT_READ
: 0)
2993 | (nsegments
[segA
].hasW
? VKI_PROT_WRITE
: 0)
2994 | (nsegments
[segA
].hasX
? VKI_PROT_EXEC
: 0);
2996 aspacem_assert(VG_IS_PAGE_ALIGNED(delta
<0 ? -delta
: delta
));
3000 /* Extending the segment forwards. */
3002 if (segR
>= nsegments_used
3003 || nsegments
[segR
].kind
!= SkResvn
3004 || nsegments
[segR
].smode
!= SmLower
)
3007 if (delta
+ VKI_PAGE_SIZE
3008 > (nsegments
[segR
].end
- nsegments
[segR
].start
+ 1)) {
3013 /* Extend the kernel's mapping. */
3014 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
3015 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
3016 nsegments
[segR
].start
, delta
,
3018 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
3021 if (sr_isError(sres
))
3022 return NULL
; /* kernel bug if this happens? */
3023 if (sr_Res(sres
) != nsegments
[segR
].start
) {
3024 /* kernel bug if this happens? */
3025 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), delta
);
3029 /* Ok, success with the kernel. Update our structures. */
3030 nsegments
[segR
].start
+= delta
;
3031 nsegments
[segA
].end
+= delta
;
3032 aspacem_assert(nsegments
[segR
].start
<= nsegments
[segR
].end
);
3036 /* Extending the segment backwards. */
3038 aspacem_assert(delta
> 0);
3042 || nsegments
[segR
].kind
!= SkResvn
3043 || nsegments
[segR
].smode
!= SmUpper
)
3046 if (delta
+ VKI_PAGE_SIZE
3047 > (nsegments
[segR
].end
- nsegments
[segR
].start
+ 1)) {
3052 /* Extend the kernel's mapping. */
3053 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
3054 sres
= VG_(am_do_mmap_NO_NOTIFY
)(
3055 nsegments
[segA
].start
-delta
, delta
,
3057 VKI_MAP_FIXED
|VKI_MAP_PRIVATE
|VKI_MAP_ANONYMOUS
,
3060 if (sr_isError(sres
))
3061 return NULL
; /* kernel bug if this happens? */
3062 if (sr_Res(sres
) != nsegments
[segA
].start
-delta
) {
3063 /* kernel bug if this happens? */
3064 (void)ML_(am_do_munmap_NO_NOTIFY
)( sr_Res(sres
), delta
);
3068 /* Ok, success with the kernel. Update our structures. */
3069 nsegments
[segR
].end
-= delta
;
3070 nsegments
[segA
].start
-= delta
;
3071 aspacem_assert(nsegments
[segR
].start
<= nsegments
[segR
].end
);
3075 return nsegments
+ segA
;
3079 /* --- --- --- resizing/move a mapping --- --- --- */
3083 /* This function grows a client mapping in place into an adjacent free segment.
3084 ADDR is the client mapping's start address and DELTA, which must be page
3085 aligned, is the growth amount. The function returns a pointer to the
3086 resized segment. The function is used in support of mremap. */
3087 const NSegment
*VG_(am_extend_map_client
)( Addr addr
, SizeT delta
)
3093 VG_(am_show_nsegments
)(0, "VG_(am_extend_map_client) BEFORE");
3095 /* Get the client segment */
3096 Int ix
= find_nsegment_idx(addr
);
3097 aspacem_assert(ix
>= 0 && ix
< nsegments_used
);
3099 NSegment
*seg
= nsegments
+ ix
;
3101 aspacem_assert(seg
->kind
== SkFileC
|| seg
->kind
== SkAnonC
||
3102 seg
->kind
== SkShmC
);
3103 aspacem_assert(delta
> 0 && VG_IS_PAGE_ALIGNED(delta
)) ;
3105 xStart
= seg
->end
+1;
3106 aspacem_assert(xStart
+ delta
>= delta
); // no wrap-around
3108 /* The segment following the client segment must be a free segment and
3109 it must be large enough to cover the additional memory. */
3110 NSegment
*segf
= seg
+ 1;
3111 aspacem_assert(segf
->kind
== SkFree
);
3112 aspacem_assert(segf
->start
== xStart
);
3113 aspacem_assert(xStart
+ delta
- 1 <= segf
->end
);
3115 SizeT seg_old_len
= seg
->end
+ 1 - seg
->start
;
3118 sres
= ML_(am_do_extend_mapping_NO_NOTIFY
)( seg
->start
,
3120 seg_old_len
+ delta
);
3121 if (sr_isError(sres
)) {
3125 /* the area must not have moved */
3126 aspacem_assert(sr_Res(sres
) == seg
->start
);
3129 NSegment seg_copy
= *seg
;
3130 seg_copy
.end
+= delta
;
3131 add_segment( &seg_copy
);
3134 VG_(am_show_nsegments
)(0, "VG_(am_extend_map_client) AFTER");
3137 return nsegments
+ find_nsegment_idx(addr
);
3141 /* Remap the old address range to the new address range. Fails if any
3142 parameter is not page aligned, if the either size is zero, if any
3143 wraparound is implied, if the old address range does not fall
3144 entirely within a single segment, if the new address range overlaps
3145 with the old one, or if the old address range is not a valid client
3146 mapping. If *need_discard is True after a successful return, the
3147 caller should immediately discard translations from both specified
3150 Bool
VG_(am_relocate_nooverlap_client
)( /*OUT*/Bool
* need_discard
,
3151 Addr old_addr
, SizeT old_len
,
3152 Addr new_addr
, SizeT new_len
)
3158 if (old_len
== 0 || new_len
== 0)
3161 if (!VG_IS_PAGE_ALIGNED(old_addr
) || !VG_IS_PAGE_ALIGNED(old_len
)
3162 || !VG_IS_PAGE_ALIGNED(new_addr
) || !VG_IS_PAGE_ALIGNED(new_len
))
3165 if (old_addr
+ old_len
< old_addr
3166 || new_addr
+ new_len
< new_addr
)
3169 if (old_addr
+ old_len
- 1 < new_addr
3170 || new_addr
+ new_len
- 1 < old_addr
) {
3175 iLo
= find_nsegment_idx( old_addr
);
3176 iHi
= find_nsegment_idx( old_addr
+ old_len
- 1 );
3180 if (nsegments
[iLo
].kind
!= SkFileC
&& nsegments
[iLo
].kind
!= SkAnonC
&&
3181 nsegments
[iLo
].kind
!= SkShmC
)
3184 sres
= ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY
)
3185 ( old_addr
, old_len
, new_addr
, new_len
);
3186 if (sr_isError(sres
)) {
3190 aspacem_assert(sr_Res(sres
) == new_addr
);
3193 *need_discard
= any_Ts_in_range( old_addr
, old_len
)
3194 || any_Ts_in_range( new_addr
, new_len
);
3196 seg
= nsegments
[iLo
];
3198 /* Mark the new area based on the old seg. */
3199 if (seg
.kind
== SkFileC
) {
3200 seg
.offset
+= ((ULong
)old_addr
) - ((ULong
)seg
.start
);
3202 seg
.start
= new_addr
;
3203 seg
.end
= new_addr
+ new_len
- 1;
3204 add_segment( &seg
);
3206 /* Create a free hole in the old location. */
3207 init_nsegment( &seg
);
3208 seg
.start
= old_addr
;
3209 seg
.end
= old_addr
+ old_len
- 1;
3210 /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3212 if (old_addr
> aspacem_maxAddr
3213 && /* check previous comparison is meaningful */
3214 aspacem_maxAddr
< Addr_MAX
)
3219 add_segment( &seg
);
3225 #endif // HAVE_MREMAP
3228 #if defined(VGO_linux)
3230 /*-----------------------------------------------------------------*/
3232 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3233 /*--- Almost completely independent of the stuff above. The ---*/
3234 /*--- only function it 'exports' to the code above this comment ---*/
3235 /*--- is parse_procselfmaps. ---*/
3237 /*-----------------------------------------------------------------*/
3239 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3241 /* Size of a smallish table used to read /proc/self/map entries. */
3242 #define M_PROCMAP_BUF 100000
3244 /* static ... to keep it out of the stack frame. */
3245 static HChar procmap_buf
[M_PROCMAP_BUF
];
3247 /* Records length of /proc/self/maps read into procmap_buf. */
3248 static Int buf_n_tot
;
3252 static Int
hexdigit ( HChar c
)
3254 if (c
>= '0' && c
<= '9') return (Int
)(c
- '0');
3255 if (c
>= 'a' && c
<= 'f') return 10 + (Int
)(c
- 'a');
3256 if (c
>= 'A' && c
<= 'F') return 10 + (Int
)(c
- 'A');
3260 static Int
decdigit ( HChar c
)
3262 if (c
>= '0' && c
<= '9') return (Int
)(c
- '0');
3266 static Int
readchar ( const HChar
* buf
, HChar
* ch
)
3268 if (*buf
== 0) return 0;
3273 static Int
readhex ( const HChar
* buf
, UWord
* val
)
3275 /* Read a word-sized hex number. */
3278 while (hexdigit(*buf
) >= 0) {
3279 *val
= (*val
<< 4) + hexdigit(*buf
);
3285 static Int
readhex64 ( const HChar
* buf
, ULong
* val
)
3287 /* Read a potentially 64-bit hex number. */
3290 while (hexdigit(*buf
) >= 0) {
3291 *val
= (*val
<< 4) + hexdigit(*buf
);
3297 static Int
readdec64 ( const HChar
* buf
, ULong
* val
)
3301 while (decdigit(*buf
) >= 0) {
3302 *val
= (*val
* 10) + decdigit(*buf
);
3309 /* Get the contents of /proc/self/maps into a static buffer. If
3310 there's a syntax error, it won't fit, or other failure, just
3313 static void read_procselfmaps_into_buf ( void )
3318 /* Read the initial memory mapping from the /proc filesystem. */
3319 fd
= ML_(am_open
)( "/proc/self/maps", VKI_O_RDONLY
, 0 );
3321 ML_(am_barf
)("can't open /proc/self/maps");
3325 n_chunk
= ML_(am_read
)( sr_Res(fd
), &procmap_buf
[buf_n_tot
],
3326 M_PROCMAP_BUF
- buf_n_tot
);
3328 buf_n_tot
+= n_chunk
;
3329 } while ( n_chunk
> 0 && buf_n_tot
< M_PROCMAP_BUF
);
3331 ML_(am_close
)(sr_Res(fd
));
3333 if (buf_n_tot
>= M_PROCMAP_BUF
-5)
3334 ML_(am_barf_toolow
)("M_PROCMAP_BUF");
3336 ML_(am_barf
)("I/O error on /proc/self/maps");
3338 procmap_buf
[buf_n_tot
] = 0;
3341 /* Parse /proc/self/maps. For each map entry, call
3342 record_mapping, passing it, in this order:
3344 start address in memory
3346 page protections (using the VKI_PROT_* flags)
3347 mapped file device and inode
3348 offset in file, or zero if no file
3349 filename, zero terminated, or NULL if no file
3351 So the sig of the called fn might be
3353 void (*record_mapping)( Addr start, SizeT size, UInt prot,
3354 UInt dev, UInt info,
3355 ULong foffset, UChar* filename )
3357 Note that the supplied filename is transiently stored; record_mapping
3358 should make a copy if it wants to keep it.
3360 Nb: it is important that this function does not alter the contents of
3363 static void parse_procselfmaps (
3364 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3365 ULong dev
, ULong ino
, Off64T offset
,
3366 const HChar
* filename
),
3367 void (*record_gap
)( Addr addr
, SizeT len
)
3371 Addr start
, endPlusOne
, gapStart
;
3373 HChar rr
, ww
, xx
, pp
, ch
, tmp
;
3376 ULong foffset
, dev
, ino
;
3378 foffset
= ino
= 0; /* keep gcc-4.1.0 happy */
3380 read_procselfmaps_into_buf();
3382 aspacem_assert('\0' != procmap_buf
[0] && 0 != buf_n_tot
);
3385 VG_(debugLog
)(0, "procselfmaps", "raw:\n%s\n", procmap_buf
);
3387 /* Ok, it's safely aboard. Parse the entries. */
3389 gapStart
= Addr_MIN
;
3391 if (i
>= buf_n_tot
) break;
3393 /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3394 j
= readhex(&procmap_buf
[i
], &start
);
3395 if (j
> 0) i
+= j
; else goto syntaxerror
;
3396 j
= readchar(&procmap_buf
[i
], &ch
);
3397 if (j
== 1 && ch
== '-') i
+= j
; else goto syntaxerror
;
3398 j
= readhex(&procmap_buf
[i
], &endPlusOne
);
3399 if (j
> 0) i
+= j
; else goto syntaxerror
;
3401 j
= readchar(&procmap_buf
[i
], &ch
);
3402 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3404 j
= readchar(&procmap_buf
[i
], &rr
);
3405 if (j
== 1 && (rr
== 'r' || rr
== '-')) i
+= j
; else goto syntaxerror
;
3406 j
= readchar(&procmap_buf
[i
], &ww
);
3407 if (j
== 1 && (ww
== 'w' || ww
== '-')) i
+= j
; else goto syntaxerror
;
3408 j
= readchar(&procmap_buf
[i
], &xx
);
3409 if (j
== 1 && (xx
== 'x' || xx
== '-')) i
+= j
; else goto syntaxerror
;
3410 /* This field is the shared/private flag */
3411 j
= readchar(&procmap_buf
[i
], &pp
);
3412 if (j
== 1 && (pp
== 'p' || pp
== '-' || pp
== 's'))
3413 i
+= j
; else goto syntaxerror
;
3415 j
= readchar(&procmap_buf
[i
], &ch
);
3416 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3418 j
= readhex64(&procmap_buf
[i
], &foffset
);
3419 if (j
> 0) i
+= j
; else goto syntaxerror
;
3421 j
= readchar(&procmap_buf
[i
], &ch
);
3422 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3424 j
= readhex(&procmap_buf
[i
], &maj
);
3425 if (j
> 0) i
+= j
; else goto syntaxerror
;
3426 j
= readchar(&procmap_buf
[i
], &ch
);
3427 if (j
== 1 && ch
== ':') i
+= j
; else goto syntaxerror
;
3428 j
= readhex(&procmap_buf
[i
], &min
);
3429 if (j
> 0) i
+= j
; else goto syntaxerror
;
3431 j
= readchar(&procmap_buf
[i
], &ch
);
3432 if (j
== 1 && ch
== ' ') i
+= j
; else goto syntaxerror
;
3434 j
= readdec64(&procmap_buf
[i
], &ino
);
3435 if (j
> 0) i
+= j
; else goto syntaxerror
;
3440 VG_(debugLog
)(0, "Valgrind:",
3441 "FATAL: syntax error reading /proc/self/maps\n");
3448 for (; k
<= i
; k
++) {
3449 buf50
[m
] = procmap_buf
[k
];
3453 VG_(debugLog
)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50
);
3459 aspacem_assert(i
< buf_n_tot
);
3461 /* Try and find the name of the file mapped to this segment, if
3462 it exists. Note that file names can contain spaces. */
3464 // Move i to the next non-space char, which should be either a '/',
3465 // a '[', or a newline.
3466 while (procmap_buf
[i
] == ' ') i
++;
3468 // Move i_eol to the end of the line.
3470 while (procmap_buf
[i_eol
] != '\n') i_eol
++;
3472 // If there's a filename...
3473 if (procmap_buf
[i
] == '/') {
3474 /* Minor hack: put a '\0' at the filename end for the call to
3475 'record_mapping', then restore the old char with 'tmp'. */
3476 filename
= &procmap_buf
[i
];
3477 tmp
= filename
[i_eol
- i
];
3478 filename
[i_eol
- i
] = '\0';
3486 if (rr
== 'r') prot
|= VKI_PROT_READ
;
3487 if (ww
== 'w') prot
|= VKI_PROT_WRITE
;
3488 if (xx
== 'x') prot
|= VKI_PROT_EXEC
;
3490 /* Linux has two ways to encode a device number when it
3491 is exposed to user space (via fstat etc). The old way
3492 is the traditional unix scheme that produces a 16 bit
3493 device number with the top 8 being the major number and
3494 the bottom 8 the minor number.
3496 The new scheme allows for a 12 bit major number and
3497 a 20 bit minor number by using a 32 bit device number
3498 and putting the top 12 bits of the minor number into
3499 the top 12 bits of the device number thus leaving an
3500 extra 4 bits for the major number.
3502 If the minor and major number are both single byte
3503 values then both schemes give the same result so we
3504 use the new scheme here in case either number is
3505 outside the 0-255 range and then use fstat64 when
3506 available (or fstat on 64 bit systems) so that we
3507 should always have a new style device number and
3508 everything should match. */
3509 dev
= (min
& 0xff) | (maj
<< 8) | ((min
& ~0xff) << 12);
3511 if (record_gap
&& gapStart
< start
)
3512 (*record_gap
) ( gapStart
, start
-gapStart
);
3514 if (record_mapping
&& start
< endPlusOne
)
3515 (*record_mapping
) ( start
, endPlusOne
-start
,
3517 foffset
, filename
);
3520 filename
[i_eol
- i
] = tmp
;
3524 gapStart
= endPlusOne
;
3527 # if defined(VGP_arm_linux)
3528 /* ARM puts code at the end of memory that contains processor
3529 specific stuff (cmpxchg, getting the thread local storage, etc.)
3530 This isn't specified in /proc/self/maps, so do it here. This
3531 kludgery causes the view of memory, as presented to
3532 record_gap/record_mapping, to actually reflect reality. IMO
3533 (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3534 the commpage should be regarded as a bug in the kernel. */
3535 { const Addr commpage_start
= ARM_LINUX_FAKE_COMMPAGE_START
;
3536 const Addr commpage_end1
= ARM_LINUX_FAKE_COMMPAGE_END1
;
3537 if (gapStart
< commpage_start
) {
3539 (*record_gap
)( gapStart
, commpage_start
- gapStart
);
3541 (*record_mapping
)( commpage_start
, commpage_end1
- commpage_start
,
3542 VKI_PROT_READ
|VKI_PROT_EXEC
,
3543 0/*dev*/, 0/*ino*/, 0/*foffset*/,
3545 gapStart
= commpage_end1
;
3550 if (record_gap
&& gapStart
< Addr_MAX
)
3551 (*record_gap
) ( gapStart
, Addr_MAX
- gapStart
+ 1 );
3554 /*------END-procmaps-parser-for-Linux----------------------------*/
3556 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3558 #elif defined(VGO_darwin)
3559 #include <mach/mach.h>
3560 #include <mach/mach_vm.h>
3562 static unsigned int mach2vki(unsigned int vm_prot
)
3565 ((vm_prot
& VM_PROT_READ
) ? VKI_PROT_READ
: 0) |
3566 ((vm_prot
& VM_PROT_WRITE
) ? VKI_PROT_WRITE
: 0) |
3567 ((vm_prot
& VM_PROT_EXECUTE
) ? VKI_PROT_EXEC
: 0) ;
3570 static UInt stats_machcalls
= 0;
3572 static void parse_procselfmaps (
3573 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3574 ULong dev
, ULong ino
, Off64T offset
,
3575 const HChar
* filename
),
3576 void (*record_gap
)( Addr addr
, SizeT len
)
3587 mach_vm_address_t addr
= iter
;
3588 mach_vm_size_t size
;
3589 vm_region_submap_short_info_data_64_t info
;
3593 mach_msg_type_number_t info_count
3594 = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
;
3596 kr
= mach_vm_region_recurse(mach_task_self(), &addr
, &size
, &depth
,
3597 (vm_region_info_t
)&info
, &info_count
);
3600 if (info
.is_submap
) {
3608 if (addr
> last
&& record_gap
) {
3609 (*record_gap
)(last
, addr
- last
);
3611 if (record_mapping
) {
3612 (*record_mapping
)(addr
, size
, mach2vki(info
.protection
),
3613 0, 0, info
.offset
, NULL
);
3618 if ((Addr
)-1 > last
&& record_gap
)
3619 (*record_gap
)(last
, (Addr
)-1 - last
);
3622 // Urr. So much for thread safety.
3623 static Bool css_overflowed
;
3624 static ChangedSeg
* css_local
;
3625 static Int css_size_local
;
3626 static Int css_used_local
;
3628 static Addr
Addr__max ( Addr a
, Addr b
) { return a
> b
? a
: b
; }
3629 static Addr
Addr__min ( Addr a
, Addr b
) { return a
< b
? a
: b
; }
3631 static void add_mapping_callback(Addr addr
, SizeT len
, UInt prot
,
3632 ULong dev
, ULong ino
, Off64T offset
,
3633 const HChar
*filename
)
3635 // derived from sync_check_mapping_callback()
3637 /* JRS 2012-Mar-07: this all seems very dubious to me. It would be
3638 safer to see if we can find, in V's segment collection, one
3639 single segment that completely covers the range [addr, +len)
3640 (and possibly more), and that has the exact same other
3641 properties (prot, dev, ino, offset, etc) as the data presented
3642 here. If found, we just skip. Otherwise add the data presented
3643 here into css_local[]. */
3647 if (len
== 0) return;
3649 /* The kernel should not give us wraparounds. */
3650 aspacem_assert(addr
<= addr
+ len
- 1);
3652 iLo
= find_nsegment_idx( addr
);
3653 iHi
= find_nsegment_idx( addr
+ len
- 1 );
3655 /* NSegments iLo .. iHi inclusive should agree with the presented
3657 for (i
= iLo
; i
<= iHi
; i
++) {
3661 if (nsegments
[i
].kind
== SkAnonV
|| nsegments
[i
].kind
== SkFileV
) {
3662 /* Ignore V regions */
3665 else if (nsegments
[i
].kind
== SkFree
|| nsegments
[i
].kind
== SkResvn
) {
3666 /* Add mapping for SkResvn regions */
3667 ChangedSeg
* cs
= &css_local
[css_used_local
];
3668 if (css_used_local
< css_size_local
) {
3669 cs
->is_added
= True
;
3671 cs
->end
= addr
+ len
- 1;
3673 cs
->offset
= offset
;
3676 css_overflowed
= True
;
3681 else if (nsegments
[i
].kind
== SkAnonC
||
3682 nsegments
[i
].kind
== SkFileC
||
3683 nsegments
[i
].kind
== SkShmC
)
3685 /* Check permissions on client regions */
3688 if (nsegments
[i
].hasR
) seg_prot
|= VKI_PROT_READ
;
3689 if (nsegments
[i
].hasW
) seg_prot
|= VKI_PROT_WRITE
;
3690 # if defined(VGA_x86)
3691 // GrP fixme sloppyXcheck
3692 // darwin: kernel X ignored and spuriously changes? (vm_copy)
3693 seg_prot
|= (prot
& VKI_PROT_EXEC
);
3695 if (nsegments
[i
].hasX
) seg_prot
|= VKI_PROT_EXEC
;
3697 if (seg_prot
!= prot
) {
3698 if (VG_(clo_trace_syscalls
))
3699 VG_(debugLog
)(0,"aspacem","region %p..%p permission "
3700 "mismatch (kernel %x, V %x)\n",
3701 (void*)nsegments
[i
].start
,
3702 (void*)(nsegments
[i
].end
+1), prot
, seg_prot
);
3703 /* Add mapping for regions with protection changes */
3704 ChangedSeg
* cs
= &css_local
[css_used_local
];
3705 if (css_used_local
< css_size_local
) {
3706 cs
->is_added
= True
;
3708 cs
->end
= addr
+ len
- 1;
3710 cs
->offset
= offset
;
3713 css_overflowed
= True
;
3725 static void remove_mapping_callback(Addr addr
, SizeT len
)
3727 // derived from sync_check_gap_callback()
3734 /* The kernel should not give us wraparounds. */
3735 aspacem_assert(addr
<= addr
+ len
- 1);
3737 iLo
= find_nsegment_idx( addr
);
3738 iHi
= find_nsegment_idx( addr
+ len
- 1 );
3740 /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3741 for (i
= iLo
; i
<= iHi
; i
++) {
3742 if (nsegments
[i
].kind
!= SkFree
&& nsegments
[i
].kind
!= SkResvn
) {
3743 /* V has a mapping, kernel doesn't. Add to css_local[],
3744 directives to chop off the part of the V mapping that
3745 falls within the gap that the kernel tells us is
3747 ChangedSeg
* cs
= &css_local
[css_used_local
];
3748 if (css_used_local
< css_size_local
) {
3749 cs
->is_added
= False
;
3750 cs
->start
= Addr__max(nsegments
[i
].start
, addr
);
3751 cs
->end
= Addr__min(nsegments
[i
].end
, addr
+ len
- 1);
3752 aspacem_assert(VG_IS_PAGE_ALIGNED(cs
->start
));
3753 aspacem_assert(VG_IS_PAGE_ALIGNED(cs
->end
+1));
3754 /* I don't think the following should fail. But if it
3755 does, just omit the css_used_local++ in the cases where
3757 aspacem_assert(cs
->start
< cs
->end
);
3762 css_overflowed
= True
;
3769 // Returns False if 'css' wasn't big enough.
3770 Bool
VG_(get_changed_segments
)(
3771 const HChar
* when
, const HChar
* where
, /*OUT*/ChangedSeg
* css
,
3772 Int css_size
, /*OUT*/Int
* css_used
)
3774 static UInt stats_synccalls
= 1;
3775 aspacem_assert(when
&& where
);
3778 VG_(debugLog
)(0,"aspacem",
3779 "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3780 stats_synccalls
++, stats_machcalls
, when
, where
3783 css_overflowed
= False
;
3785 css_size_local
= css_size
;
3788 // Get the list of segs that need to be added/removed.
3789 parse_procselfmaps(&add_mapping_callback
, &remove_mapping_callback
);
3791 *css_used
= css_used_local
;
3793 if (css_overflowed
) {
3794 aspacem_assert(css_used_local
== css_size_local
);
3797 return !css_overflowed
;
3800 #endif // defined(VGO_darwin)
3802 /*------END-procmaps-parser-for-Darwin---------------------------*/
3804 /*------BEGIN-procmaps-parser-for-Solaris------------------------*/
3806 #if defined(VGO_solaris)
3808 /* Note: /proc/self/xmap contains extended information about already
3809 materialized mappings whereas /proc/self/rmap contains information about
3810 all mappings including reserved but yet-to-materialize mappings (mmap'ed
3811 with MAP_NORESERVE flag, such as thread stacks). But /proc/self/rmap does
3812 not contain extended information found in /proc/self/xmap. Therefore
3813 information from both sources need to be combined.
3824 HChar filename
[VKI_PATH_MAX
];
3827 static SizeT
read_proc_file(const HChar
*filename
, HChar
*buf
,
3828 SizeT buf_size
, const HChar
*buf_size_name
,
3831 SysRes res
= ML_(am_open
)(filename
, VKI_O_RDONLY
, 0);
3832 if (sr_isError(res
)) {
3834 ML_(am_sprintf
)(message
, "Cannot open %s.", filename
);
3835 ML_(am_barf
)(message
);
3837 Int fd
= sr_Res(res
);
3839 Int r
= ML_(am_read
)(fd
, buf
, buf_size
);
3843 ML_(am_sprintf
)(message
, "I/O error on %s.", filename
);
3844 ML_(am_barf
)(message
);
3848 ML_(am_barf_toolow
)(buf_size_name
);
3850 if (r
% entry_size
!= 0) {
3852 ML_(am_sprintf
)(message
, "Bogus values read from %s.", filename
);
3853 ML_(am_barf
)(message
);
3856 return r
/ entry_size
;
3859 static Mapping
*next_xmap(const HChar
*buffer
, SizeT entries
, SizeT
*idx
,
3862 aspacem_assert(idx
);
3863 aspacem_assert(mapping
);
3865 if (*idx
>= entries
)
3866 return NULL
; /* No more entries */
3868 const vki_prxmap_t
*map
= (const vki_prxmap_t
*)buffer
+ *idx
;
3870 mapping
->addr
= map
->pr_vaddr
;
3871 mapping
->size
= map
->pr_size
;
3874 if (map
->pr_mflags
& VKI_MA_READ
)
3875 mapping
->prot
|= VKI_PROT_READ
;
3876 if (map
->pr_mflags
& VKI_MA_WRITE
)
3877 mapping
->prot
|= VKI_PROT_WRITE
;
3878 if (map
->pr_mflags
& VKI_MA_EXEC
)
3879 mapping
->prot
|= VKI_PROT_EXEC
;
3881 if (map
->pr_dev
!= VKI_PRNODEV
) {
3882 mapping
->dev
= map
->pr_dev
;
3883 mapping
->ino
= map
->pr_ino
;
3884 mapping
->foffset
= map
->pr_offset
;
3889 mapping
->foffset
= 0;
3892 /* Try to get the filename. */
3893 mapping
->filename
[0] = '\0';
3894 if (map
->pr_mapname
[0] != '\0') {
3895 ML_(am_sprintf
)(mapping
->filename
, "/proc/self/path/%s",
3897 Int r
= ML_(am_readlink
)(mapping
->filename
, mapping
->filename
,
3898 sizeof(mapping
->filename
) - 1);
3900 /* If Valgrind is executed in a non-global zone and the link in
3901 /proc/self/path/ represents a file that is available through lofs
3902 from a global zone then the kernel may not be able to resolve the
3905 In such a case, return a corresponding /proc/self/object/ file to
3906 allow Valgrind to read the file if it is necessary.
3908 This can create some discrepancy for the sanity check. For
3909 instance, if a client program mmaps some file then the address
3910 space manager will have a correct zone-local name of that file,
3911 but the sanity check will receive a different file name from this
3912 code. This currently does not represent a problem because the
3913 sanity check ignores the file names (it uses device and inode
3914 numbers for the comparison).
3916 ML_(am_sprintf
)(mapping
->filename
, "/proc/self/object/%s",
3920 aspacem_assert(r
>= 0);
3921 mapping
->filename
[r
] = '\0';
3929 static Mapping
*next_rmap(const HChar
*buffer
, SizeT entries
, SizeT
*idx
,
3932 aspacem_assert(idx
);
3933 aspacem_assert(mapping
);
3935 if (*idx
>= entries
)
3936 return NULL
; /* No more entries */
3938 const vki_prmap_t
*map
= (const vki_prmap_t
*)buffer
+ *idx
;
3940 mapping
->addr
= map
->pr_vaddr
;
3941 mapping
->size
= map
->pr_size
;
3944 if (map
->pr_mflags
& VKI_MA_READ
)
3945 mapping
->prot
|= VKI_PROT_READ
;
3946 if (map
->pr_mflags
& VKI_MA_WRITE
)
3947 mapping
->prot
|= VKI_PROT_WRITE
;
3948 if (map
->pr_mflags
& VKI_MA_EXEC
)
3949 mapping
->prot
|= VKI_PROT_EXEC
;
3953 mapping
->foffset
= 0;
3954 mapping
->filename
[0] = '\0';
3960 /* Used for two purposes:
3961 1. Establish initial mappings upon the process startup
3962 2. Check mappings during aspacemgr sanity check
3964 static void parse_procselfmaps (
3965 void (*record_mapping
)( Addr addr
, SizeT len
, UInt prot
,
3966 ULong dev
, ULong ino
, Off64T offset
,
3967 const HChar
*filename
),
3968 void (*record_gap
)( Addr addr
, SizeT len
)
3971 Addr start
= Addr_MIN
;
3972 Addr gap_start
= Addr_MIN
;
3974 #define M_XMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prxmap_t))
3975 /* Static to keep it out of stack frame... */
3976 static HChar xmap_buf
[M_XMAP_BUF
];
3977 const Mapping
*xmap
= NULL
;
3978 SizeT xmap_index
= 0; /* Current entry */
3980 Mapping xmap_mapping
;
3983 #define M_RMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prmap_t))
3984 static HChar rmap_buf
[M_RMAP_BUF
];
3985 const Mapping
*rmap
= NULL
;
3986 SizeT rmap_index
= 0; /* Current entry */
3988 Mapping rmap_mapping
;
3991 /* Read fully /proc/self/xmap and /proc/self/rmap. */
3992 xmap_entries
= read_proc_file("/proc/self/xmap", xmap_buf
, M_XMAP_BUF
,
3993 "M_XMAP_BUF", sizeof(vki_prxmap_t
));
3995 rmap_entries
= read_proc_file("/proc/self/rmap", rmap_buf
, M_RMAP_BUF
,
3996 "M_RMAP_BUF", sizeof(vki_prmap_t
));
3998 /* Get the first xmap and rmap. */
3999 advance_xmap
= True
;
4000 advance_rmap
= True
;
4003 /* Get next xmap or rmap if necessary. */
4005 xmap
= next_xmap(xmap_buf
, xmap_entries
, &xmap_index
, &xmap_mapping
);
4006 advance_xmap
= False
;
4009 rmap
= next_rmap(rmap_buf
, rmap_entries
, &rmap_index
, &rmap_mapping
);
4010 advance_rmap
= False
;
4013 /* Check if the end has been reached. */
4019 aspacem_assert(start
<= xmap
->addr
);
4020 aspacem_assert(rmap
->addr
<= xmap
->addr
);
4023 if (xmap
!= NULL
&& start
== xmap
->addr
) {
4024 /* xmap mapping reached. */
4025 aspacem_assert(xmap
->addr
>= rmap
->addr
&&
4026 xmap
->addr
+ xmap
->size
<= rmap
->addr
+ rmap
->size
);
4027 aspacem_assert(xmap
->prot
== rmap
->prot
);
4029 if (record_mapping
!= NULL
)
4030 (*record_mapping
)(xmap
->addr
, xmap
->size
, xmap
->prot
, xmap
->dev
,
4031 xmap
->ino
, xmap
->foffset
,
4032 (xmap
->filename
[0] != '\0') ?
4033 xmap
->filename
: NULL
);
4035 start
= xmap
->addr
+ xmap
->size
;
4036 advance_xmap
= True
;
4038 else if (start
>= rmap
->addr
) {
4039 /* Reserved-only part. */
4040 /* First calculate size until the end of this reserved mapping... */
4041 SizeT size
= rmap
->addr
+ rmap
->size
- start
;
4042 /* ... but shrink it if some xmap is in a way. */
4043 if (xmap
!= NULL
&& size
> xmap
->addr
- start
)
4044 size
= xmap
->addr
- start
;
4046 if (record_mapping
!= NULL
)
4047 (*record_mapping
)(start
, size
, rmap
->prot
, 0, 0, 0, NULL
);
4052 if (record_gap
!= NULL
&& gap_start
< start
)
4053 (*record_gap
)(gap_start
, start
- gap_start
);
4057 if (rmap
->addr
+ rmap
->size
<= start
)
4058 advance_rmap
= True
;
4063 if (record_gap
!= NULL
&& gap_start
< Addr_MAX
)
4064 (*record_gap
)(gap_start
, Addr_MAX
- gap_start
+ 1);
4067 /* parse_procselfmaps() callbacks do not allow for easy thread safety. */
4068 static Addr found_addr
;
4069 static SizeT found_size
;
4070 static UInt found_prot
;
4072 /* Reports a new mapping into variables above. */
4073 static void new_segment_found_callback(Addr addr
, SizeT len
, UInt prot
,
4074 ULong dev
, ULong ino
, Off64T offset
, const HChar
*filename
)
4076 aspacem_assert(addr
<= addr
+ len
- 1);
4078 Int iLo
= find_nsegment_idx(addr
);
4079 Int iHi
= find_nsegment_idx(addr
+ len
- 1);
4080 aspacem_assert(iLo
<= iHi
);
4081 aspacem_assert(nsegments
[iLo
].start
<= addr
);
4082 aspacem_assert(nsegments
[iHi
].end
>= addr
+ len
- 1);
4084 /* Do not perform any sanity checks. That is done in other places.
4085 Just find if a reported mapping is found in aspacemgr's book keeping. */
4086 for (Int i
= iLo
; i
<= iHi
; i
++) {
4087 if ((nsegments
[i
].kind
== SkFree
) || (nsegments
[i
].kind
== SkResvn
)) {
4096 /* Returns True if a new segment was found. */
4097 Bool
VG_(am_search_for_new_segment
)(Addr
*addr
, SizeT
*size
, UInt
*prot
)
4100 parse_procselfmaps(new_segment_found_callback
, NULL
);
4102 if (found_addr
!= 0) {
4112 #endif // defined(VGO_solaris)
4114 /*------END-procmaps-parser-for-Solaris--------------------------*/
4116 #endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
4118 /*--------------------------------------------------------------------*/
4120 /*--------------------------------------------------------------------*/