2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms ---*/
5 /*--- m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
9 This file is part of Valgrind, a dynamic binary instrumentation
12 Copyright (C) 2006-2017 OpenWorks LLP
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30 The GNU General Public License is contained in the file COPYING.
33 /* *************************************************************
34 DO NOT INCLUDE ANY OTHER FILES HERE.
35 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
36 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
37 ************************************************************* */
39 #include "priv_aspacemgr.h"
43 /*-----------------------------------------------------------------*/
45 /*--- Stuff to make aspacem almost completely independent of ---*/
46 /*--- the rest of Valgrind. ---*/
48 /*-----------------------------------------------------------------*/
50 //--------------------------------------------------------------
51 // Simple assert and assert-like fns, which avoid dependence on
52 // m_libcassert, and hence on the entire debug-info reader swamp
54 __attribute__ ((noreturn
))
55 void ML_(am_exit
)( Int status
)
57 VG_(exit_now
) (status
);
60 void ML_(am_barf
) ( const HChar
* what
)
62 VG_(debugLog
)(0, "aspacem", "Valgrind: FATAL: %s\n", what
);
63 VG_(debugLog
)(0, "aspacem", "Exiting now.\n");
67 void ML_(am_barf_toolow
) ( const HChar
* what
)
69 VG_(debugLog
)(0, "aspacem",
70 "Valgrind: FATAL: %s is too low.\n", what
);
71 VG_(debugLog
)(0, "aspacem", " Increase it and rebuild. "
76 void ML_(am_assert_fail
)( const HChar
* expr
,
81 VG_(debugLog
)(0, "aspacem",
82 "Valgrind: FATAL: aspacem assertion failed:\n");
83 VG_(debugLog
)(0, "aspacem", " %s\n", expr
);
84 VG_(debugLog
)(0, "aspacem", " at %s:%d (%s)\n", file
,line
,fn
);
85 VG_(debugLog
)(0, "aspacem", "Exiting now.\n");
89 Int
ML_(am_getpid
)( void )
91 SysRes sres
= VG_(do_syscall0
)(__NR_getpid
);
92 aspacem_assert(!sr_isError(sres
));
97 //--------------------------------------------------------------
98 // A simple sprintf implementation, so as to avoid dependence on
101 static void local_add_to_aspacem_sprintf_buf ( HChar c
, void *p
)
103 HChar
** aspacem_sprintf_ptr
= p
;
104 *(*aspacem_sprintf_ptr
)++ = c
;
108 UInt
local_vsprintf ( HChar
* buf
, const HChar
*format
, va_list vargs
)
111 HChar
*aspacem_sprintf_ptr
= buf
;
113 ret
= VG_(debugLog_vprintf
)
114 ( local_add_to_aspacem_sprintf_buf
,
115 &aspacem_sprintf_ptr
, format
, vargs
);
116 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr
);
121 UInt
ML_(am_sprintf
) ( HChar
* buf
, const HChar
*format
, ... )
126 va_start(vargs
,format
);
127 ret
= local_vsprintf(buf
, format
, vargs
);
134 //--------------------------------------------------------------
135 // Direct access to a handful of syscalls. This avoids dependence on
136 // m_libc*. THESE DO NOT UPDATE THE aspacem-internal DATA
137 // STRUCTURES (SEGMENT ARRAY). DO NOT USE THEM UNLESS YOU KNOW WHAT
140 /* --- Pertaining to mappings --- */
142 /* Note: this is VG_, not ML_. */
143 SysRes
VG_(am_do_mmap_NO_NOTIFY
)( Addr start
, SizeT length
, UInt prot
,
144 UInt flags
, Int fd
, Off64T offset
)
147 aspacem_assert(VG_IS_PAGE_ALIGNED(offset
));
149 # if defined(VGP_arm64_linux)
150 res
= VG_(do_syscall6
)(__NR3264_mmap
, (UWord
)start
, length
,
151 prot
, flags
, fd
, offset
);
152 # elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
153 || defined(VGP_arm_linux)
154 /* mmap2 uses 4096 chunks even if actual page size is bigger. */
155 aspacem_assert((offset
% 4096) == 0);
156 res
= VG_(do_syscall6
)(__NR_mmap2
, (UWord
)start
, length
,
157 prot
, flags
, fd
, offset
/ 4096);
158 # elif defined(VGP_amd64_linux) \
159 || defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
160 || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
161 || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
162 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
,
163 prot
, flags
, fd
, offset
);
164 # elif defined(VGP_x86_darwin)
165 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
)) {
166 fd
= -1; // MAP_ANON with fd==0 is EINVAL
168 res
= VG_(do_syscall7
)(__NR_mmap
, (UWord
)start
, length
,
169 prot
, flags
, fd
, offset
& 0xffffffff, offset
>> 32);
170 # elif defined(VGP_amd64_darwin)
171 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
)) {
172 fd
= -1; // MAP_ANON with fd==0 is EINVAL
174 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
,
175 prot
, flags
, (UInt
)fd
, offset
);
176 # elif defined(VGP_x86_solaris)
177 /* MAP_ANON with fd==0 is EINVAL. */
178 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
))
180 res
= VG_(do_syscall7
)(__NR_mmap64
, (UWord
)start
, length
, prot
, flags
,
181 (UInt
)fd
, offset
& 0xffffffff, offset
>> 32);
182 # elif defined(VGP_amd64_solaris)
183 /* MAP_ANON with fd==0 is EINVAL. */
184 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
))
186 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
, prot
, flags
,
189 # error Unknown platform
195 SysRes
local_do_mprotect_NO_NOTIFY(Addr start
, SizeT length
, UInt prot
)
197 return VG_(do_syscall3
)(__NR_mprotect
, (UWord
)start
, length
, prot
);
200 SysRes
ML_(am_do_munmap_NO_NOTIFY
)(Addr start
, SizeT length
)
202 return VG_(do_syscall2
)(__NR_munmap
, (UWord
)start
, length
);
206 /* The following are used only to implement mremap(). */
208 SysRes
ML_(am_do_extend_mapping_NO_NOTIFY
)(
214 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
215 new_len, WITHOUT moving it. If it can't be extended in place,
217 # if defined(VGO_linux)
218 return VG_(do_syscall5
)(
220 old_addr
, old_len
, new_len
,
221 0/*flags, meaning: must be at old_addr, else FAIL */,
222 0/*new_addr, is ignored*/
229 SysRes
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY
)(
230 Addr old_addr
, Addr old_len
,
231 Addr new_addr
, Addr new_len
234 /* Move the mapping old_addr .. old_addr+old_len-1 to the new
235 location and with the new length. Only needs to handle the case
236 where the two areas do not overlap, neither length is zero, and
237 all args are page aligned. */
238 # if defined(VGO_linux)
239 return VG_(do_syscall5
)(
241 old_addr
, old_len
, new_len
,
242 VKI_MREMAP_MAYMOVE
|VKI_MREMAP_FIXED
/*move-or-fail*/,
252 /* --- Pertaining to files --- */
254 SysRes
ML_(am_open
) ( const HChar
* pathname
, Int flags
, Int mode
)
256 # if defined(VGP_arm64_linux)
257 /* ARM64 wants to use __NR_openat rather than __NR_open. */
258 SysRes res
= VG_(do_syscall4
)(__NR_openat
,
259 VKI_AT_FDCWD
, (UWord
)pathname
, flags
, mode
);
260 # elif defined(VGO_linux) || defined(VGO_darwin)
261 SysRes res
= VG_(do_syscall3
)(__NR_open
, (UWord
)pathname
, flags
, mode
);
262 # elif defined(VGO_solaris)
263 SysRes res
= VG_(do_syscall4
)(__NR_openat
, VKI_AT_FDCWD
, (UWord
)pathname
,
271 Int
ML_(am_read
) ( Int fd
, void* buf
, Int count
)
273 SysRes res
= VG_(do_syscall3
)(__NR_read
, fd
, (UWord
)buf
, count
);
274 return sr_isError(res
) ? -1 : sr_Res(res
);
277 void ML_(am_close
) ( Int fd
)
279 (void)VG_(do_syscall1
)(__NR_close
, fd
);
282 Int
ML_(am_readlink
)(const HChar
* path
, HChar
* buf
, UInt bufsiz
)
285 # if defined(VGP_arm64_linux)
286 res
= VG_(do_syscall4
)(__NR_readlinkat
, VKI_AT_FDCWD
,
287 (UWord
)path
, (UWord
)buf
, bufsiz
);
288 # elif defined(VGO_linux) || defined(VGO_darwin)
289 res
= VG_(do_syscall3
)(__NR_readlink
, (UWord
)path
, (UWord
)buf
, bufsiz
);
290 # elif defined(VGO_solaris)
291 res
= VG_(do_syscall4
)(__NR_readlinkat
, VKI_AT_FDCWD
, (UWord
)path
,
296 return sr_isError(res
) ? -1 : sr_Res(res
);
299 Int
ML_(am_fcntl
) ( Int fd
, Int cmd
, Addr arg
)
301 # if defined(VGO_linux) || defined(VGO_solaris)
302 SysRes res
= VG_(do_syscall3
)(__NR_fcntl
, fd
, cmd
, arg
);
303 # elif defined(VGO_darwin)
304 SysRes res
= VG_(do_syscall3
)(__NR_fcntl_nocancel
, fd
, cmd
, arg
);
308 return sr_isError(res
) ? -1 : sr_Res(res
);
311 /* Get the dev, inode and mode info for a file descriptor, if
312 possible. Returns True on success. */
313 Bool
ML_(am_get_fd_d_i_m
)( Int fd
,
315 /*OUT*/ULong
* ino
, /*OUT*/UInt
* mode
)
317 # if defined(VGO_linux) || defined(VGO_darwin)
320 # if defined(VGO_linux) && defined(__NR_fstat64)
321 /* Try fstat64 first as it can cope with minor and major device
322 numbers outside the 0-255 range and it works properly for x86
323 binaries on amd64 systems where fstat seems to be broken. */
324 struct vki_stat64 buf64
;
325 res
= VG_(do_syscall2
)(__NR_fstat64
, fd
, (UWord
)&buf64
);
326 if (!sr_isError(res
)) {
327 *dev
= (ULong
)buf64
.st_dev
;
328 *ino
= (ULong
)buf64
.st_ino
;
329 *mode
= (UInt
) buf64
.st_mode
;
333 res
= VG_(do_syscall2
)(__NR_fstat
, fd
, (UWord
)&buf
);
334 if (!sr_isError(res
)) {
335 *dev
= (ULong
)buf
.st_dev
;
336 *ino
= (ULong
)buf
.st_ino
;
337 *mode
= (UInt
) buf
.st_mode
;
341 # elif defined(VGO_solaris)
342 # if defined(VGP_x86_solaris)
343 struct vki_stat64 buf64
;
344 SysRes res
= VG_(do_syscall4
)(__NR_fstatat64
, fd
, 0, (UWord
)&buf64
, 0);
345 # elif defined(VGP_amd64_solaris)
346 struct vki_stat buf64
;
347 SysRes res
= VG_(do_syscall4
)(__NR_fstatat
, fd
, 0, (UWord
)&buf64
, 0);
349 # error "Unknown platform"
351 if (!sr_isError(res
)) {
352 *dev
= (ULong
)buf64
.st_dev
;
353 *ino
= (ULong
)buf64
.st_ino
;
354 *mode
= (UInt
) buf64
.st_mode
;
363 Bool
ML_(am_resolve_filename
) ( Int fd
, /*OUT*/HChar
* buf
, Int nbuf
)
365 #if defined(VGO_linux)
367 HChar tmp
[64]; // large enough
368 for (i
= 0; i
< nbuf
; i
++) buf
[i
] = 0;
369 ML_(am_sprintf
)(tmp
, "/proc/self/fd/%d", fd
);
370 if (ML_(am_readlink
)(tmp
, buf
, nbuf
) > 0 && buf
[0] == '/')
375 #elif defined(VGO_darwin)
376 HChar tmp
[VKI_MAXPATHLEN
+1];
377 if (0 == ML_(am_fcntl
)(fd
, VKI_F_GETPATH
, (UWord
)tmp
)) {
379 VG_(strncpy
)( buf
, tmp
, nbuf
< sizeof(tmp
) ? nbuf
: sizeof(tmp
) );
382 if (tmp
[0] == '/') return True
;
386 #elif defined(VGO_solaris)
389 for (i
= 0; i
< nbuf
; i
++) buf
[i
] = 0;
390 ML_(am_sprintf
)(tmp
, "/proc/self/path/%d", fd
);
391 if (ML_(am_readlink
)(tmp
, buf
, nbuf
) > 0 && buf
[0] == '/')
404 /*-----------------------------------------------------------------*/
406 /*--- Manage stacks for Valgrind itself. ---*/
408 /*-----------------------------------------------------------------*/
411 // We use a fake size of 1. A bigger size is allocated
412 // by VG_(am_alloc_VgStack).
415 /* Allocate and initialise a VgStack (anonymous valgrind space).
416 Protect the stack active area and the guard areas appropriately.
417 Returns NULL on failure, else the address of the bottom of the
418 stack. On success, also sets *initial_sp to what the stack pointer
421 VgStack
* VG_(am_alloc_VgStack
)( /*OUT*/Addr
* initial_sp
)
429 /* Allocate the stack. */
430 szB
= VG_STACK_GUARD_SZB
431 + VG_(clo_valgrind_stacksize
) + VG_STACK_GUARD_SZB
;
433 sres
= VG_(am_mmap_anon_float_valgrind
)( szB
);
434 if (sr_isError(sres
))
437 stack
= (VgStack
*)(Addr
)sr_Res(sres
);
439 aspacem_assert(VG_IS_PAGE_ALIGNED(szB
));
440 aspacem_assert(VG_IS_PAGE_ALIGNED(stack
));
442 /* Protect the guard areas. */
443 sres
= local_do_mprotect_NO_NOTIFY(
445 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
447 if (sr_isError(sres
)) goto protect_failed
;
448 VG_(am_notify_mprotect
)(
449 (Addr
) &stack
->bytes
[0],
450 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
453 sres
= local_do_mprotect_NO_NOTIFY(
454 (Addr
) &stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)],
455 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
457 if (sr_isError(sres
)) goto protect_failed
;
458 VG_(am_notify_mprotect
)(
459 (Addr
) &stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)],
460 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
463 /* Looks good. Fill the active area with junk so we can later
464 tell how much got used. */
466 p
= (UInt
*)&stack
->bytes
[VG_STACK_GUARD_SZB
];
467 for (i
= 0; i
< VG_(clo_valgrind_stacksize
)/sizeof(UInt
); i
++)
470 *initial_sp
= (Addr
)&stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)];
472 *initial_sp
&= ~((Addr
)0x1F); /* 32-align it */
474 VG_(debugLog
)( 1,"aspacem",
475 "allocated valgrind thread stack at 0x%llx size %d\n",
476 (ULong
)(Addr
)stack
, szB
);
477 ML_(am_do_sanity_check
)();
481 /* The stack was allocated, but we can't protect it. Unmap it and
482 return NULL (failure). */
483 (void)ML_(am_do_munmap_NO_NOTIFY
)( (Addr
)stack
, szB
);
484 ML_(am_do_sanity_check
)();
489 /* Figure out how many bytes of the stack's active area have not
490 been used. Used for estimating if we are close to overflowing it. */
492 SizeT
VG_(am_get_VgStack_unused_szB
)( const VgStack
* stack
, SizeT limit
)
497 p
= (const UInt
*)&stack
->bytes
[VG_STACK_GUARD_SZB
];
498 for (i
= 0; i
< VG_(clo_valgrind_stacksize
)/sizeof(UInt
); i
++) {
499 if (p
[i
] != 0xDEADBEEF)
501 if (i
* sizeof(UInt
) >= limit
)
505 return i
* sizeof(UInt
);
509 /*--------------------------------------------------------------------*/
511 /*--------------------------------------------------------------------*/