2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: stuff common to all platforms ---*/
5 /*--- m_aspacemgr-common.c ---*/
6 /*--------------------------------------------------------------------*/
9 This file is part of Valgrind, a dynamic binary instrumentation
12 Copyright (C) 2006-2017 OpenWorks LLP
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, see <http://www.gnu.org/licenses/>.
28 The GNU General Public License is contained in the file COPYING.
31 /* *************************************************************
32 DO NOT INCLUDE ANY OTHER FILES HERE.
33 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
34 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
35 ************************************************************* */
37 #include "priv_aspacemgr.h"
38 #include "pub_core_libcassert.h"
42 /*-----------------------------------------------------------------*/
44 /*--- Stuff to make aspacem almost completely independent of ---*/
45 /*--- the rest of Valgrind. ---*/
47 /*-----------------------------------------------------------------*/
49 //--------------------------------------------------------------
50 // Simple assert and assert-like fns, which avoid dependence on
51 // m_libcassert, and hence on the entire debug-info reader swamp
53 __attribute__ ((noreturn
))
54 void ML_(am_exit
)( Int status
)
56 VG_(exit_now
) (status
);
59 void ML_(am_barf
) ( const HChar
* what
)
61 VG_(debugLog
)(0, "aspacem", "Valgrind: FATAL: %s\n", what
);
62 VG_(debugLog
)(0, "aspacem", "Exiting now.\n");
66 void ML_(am_barf_toolow
) ( const HChar
* what
)
68 VG_(debugLog
)(0, "aspacem",
69 "Valgrind: FATAL: %s is too low.\n", what
);
70 VG_(debugLog
)(0, "aspacem", " Increase it and rebuild. "
75 void ML_(am_assert_fail
)( const HChar
* expr
,
80 VG_(debugLog
)(0, "aspacem",
81 "Valgrind: FATAL: aspacem assertion failed:\n");
82 VG_(debugLog
)(0, "aspacem", " %s\n", expr
);
83 VG_(debugLog
)(0, "aspacem", " at %s:%d (%s)\n", file
,line
,fn
);
84 VG_(debugLog
)(0, "aspacem", "Exiting now.\n");
88 Int
ML_(am_getpid
)( void )
90 SysRes sres
= VG_(do_syscall0
)(__NR_getpid
);
91 aspacem_assert(!sr_isError(sres
));
96 //--------------------------------------------------------------
97 // A simple sprintf implementation, so as to avoid dependence on
100 static void local_add_to_aspacem_sprintf_buf ( HChar c
, void *p
)
102 HChar
** aspacem_sprintf_ptr
= p
;
103 *(*aspacem_sprintf_ptr
)++ = c
;
107 UInt
local_vsprintf ( HChar
* buf
, const HChar
*format
, va_list vargs
)
110 HChar
*aspacem_sprintf_ptr
= buf
;
112 ret
= VG_(debugLog_vprintf
)
113 ( local_add_to_aspacem_sprintf_buf
,
114 &aspacem_sprintf_ptr
, format
, vargs
);
115 local_add_to_aspacem_sprintf_buf('\0', &aspacem_sprintf_ptr
);
120 UInt
ML_(am_sprintf
) ( HChar
* buf
, const HChar
*format
, ... )
125 va_start(vargs
,format
);
126 ret
= local_vsprintf(buf
, format
, vargs
);
133 //--------------------------------------------------------------
134 // Direct access to a handful of syscalls. This avoids dependence on
135 // m_libc*. THESE DO NOT UPDATE THE aspacem-internal DATA
136 // STRUCTURES (SEGMENT ARRAY). DO NOT USE THEM UNLESS YOU KNOW WHAT
139 /* --- Pertaining to mappings --- */
141 /* Note: this is VG_, not ML_. */
142 SysRes
VG_(am_do_mmap_NO_NOTIFY
)( Addr start
, SizeT length
, UInt prot
,
143 UInt flags
, Int fd
, Off64T offset
)
146 aspacem_assert(VG_IS_PAGE_ALIGNED(offset
));
148 # if defined(VGP_arm64_linux)
149 res
= VG_(do_syscall6
)(__NR3264_mmap
, (UWord
)start
, length
,
150 prot
, flags
, fd
, offset
);
151 # elif defined(VGP_x86_linux) || defined(VGP_ppc32_linux) \
152 || defined(VGP_arm_linux) || defined(VGP_nanomips_linux)
153 /* mmap2 uses 4096 chunks even if actual page size is bigger. */
154 aspacem_assert((offset
% 4096) == 0);
155 res
= VG_(do_syscall6
)(__NR_mmap2
, (UWord
)start
, length
,
156 prot
, flags
, fd
, offset
/ 4096);
157 # elif defined(VGP_amd64_linux) \
158 || defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux) \
159 || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
160 || defined(VGP_mips64_linux) || defined(VGP_arm64_linux)
161 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
,
162 prot
, flags
, fd
, offset
);
163 # elif defined(VGP_x86_darwin)
164 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
)) {
165 fd
= -1; // MAP_ANON with fd==0 is EINVAL
167 res
= VG_(do_syscall7
)(__NR_mmap
, (UWord
)start
, length
,
168 prot
, flags
, fd
, offset
& 0xffffffff, offset
>> 32);
169 # elif defined(VGP_amd64_darwin)
170 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
)) {
171 fd
= -1; // MAP_ANON with fd==0 is EINVAL
173 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
,
174 prot
, flags
, (UInt
)fd
, offset
);
175 # elif defined(VGP_x86_freebsd)
176 if (flags
& VKI_MAP_ANONYMOUS
&& fd
== 0)
178 res
= VG_(do_syscall7
)(__NR_mmap
, (UWord
)start
, length
,
179 prot
, flags
, fd
, offset
, offset
>> 32ul);
180 # elif defined(VGP_amd64_freebsd) || defined(VGP_arm64_freebsd)
181 if ((flags
& VKI_MAP_ANONYMOUS
) && fd
== 0)
183 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
,
184 prot
, flags
, fd
, offset
);
185 # elif defined(VGP_x86_solaris)
186 /* MAP_ANON with fd==0 is EINVAL. */
187 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
))
189 res
= VG_(do_syscall7
)(__NR_mmap64
, (UWord
)start
, length
, prot
, flags
,
190 (UInt
)fd
, offset
& 0xffffffff, offset
>> 32);
191 # elif defined(VGP_amd64_solaris)
192 /* MAP_ANON with fd==0 is EINVAL. */
193 if (fd
== 0 && (flags
& VKI_MAP_ANONYMOUS
))
195 res
= VG_(do_syscall6
)(__NR_mmap
, (UWord
)start
, length
, prot
, flags
,
198 # error Unknown platform
204 SysRes
local_do_mprotect_NO_NOTIFY(Addr start
, SizeT length
, UInt prot
)
206 return VG_(do_syscall3
)(__NR_mprotect
, (UWord
)start
, length
, prot
);
209 SysRes
ML_(am_do_munmap_NO_NOTIFY
)(Addr start
, SizeT length
)
211 return VG_(do_syscall2
)(__NR_munmap
, (UWord
)start
, length
);
215 /* The following are used only to implement mremap(). */
217 SysRes
ML_(am_do_extend_mapping_NO_NOTIFY
)(
223 /* Extend the mapping old_addr .. old_addr+old_len-1 to have length
224 new_len, WITHOUT moving it. If it can't be extended in place,
226 # if defined(VGO_linux) || defined(VGO_solaris)
227 return VG_(do_syscall5
)(
229 old_addr
, old_len
, new_len
,
230 0/*flags, meaning: must be at old_addr, else FAIL */,
231 0/*new_addr, is ignored*/
238 SysRes
ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY
)(
239 Addr old_addr
, Addr old_len
,
240 Addr new_addr
, Addr new_len
243 /* Move the mapping old_addr .. old_addr+old_len-1 to the new
244 location and with the new length. Only needs to handle the case
245 where the two areas do not overlap, neither length is zero, and
246 all args are page aligned. */
247 # if defined(VGO_linux) || defined(VGO_solaris)
248 return VG_(do_syscall5
)(
250 old_addr
, old_len
, new_len
,
251 VKI_MREMAP_MAYMOVE
|VKI_MREMAP_FIXED
/*move-or-fail*/,
261 /* --- Pertaining to files --- */
263 SysRes
ML_(am_open
) ( const HChar
* pathname
, Int flags
, Int mode
)
265 # if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
266 /* ARM64 wants to use __NR_openat rather than __NR_open. */
267 SysRes res
= VG_(do_syscall4
)(__NR_openat
,
268 VKI_AT_FDCWD
, (UWord
)pathname
, flags
, mode
);
269 # elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
270 SysRes res
= VG_(do_syscall3
)(__NR_open
, (UWord
)pathname
, flags
, mode
);
271 # elif defined(VGO_solaris)
272 SysRes res
= VG_(do_syscall4
)(__NR_openat
, VKI_AT_FDCWD
, (UWord
)pathname
,
280 Int
ML_(am_read
) ( Int fd
, void* buf
, Int count
)
282 SysRes res
= VG_(do_syscall3
)(__NR_read
, fd
, (UWord
)buf
, count
);
283 return sr_isError(res
) ? -1 : sr_Res(res
);
286 void ML_(am_close
) ( Int fd
)
288 (void)VG_(do_syscall1
)(__NR_close
, fd
);
291 Int
ML_(am_readlink
)(const HChar
* path
, HChar
* buf
, UInt bufsiz
)
294 # if defined(VGP_arm64_linux) || defined(VGP_nanomips_linux)
295 res
= VG_(do_syscall4
)(__NR_readlinkat
, VKI_AT_FDCWD
,
296 (UWord
)path
, (UWord
)buf
, bufsiz
);
297 # elif defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_freebsd)
298 res
= VG_(do_syscall3
)(__NR_readlink
, (UWord
)path
, (UWord
)buf
, bufsiz
);
299 # elif defined(VGO_solaris)
300 res
= VG_(do_syscall4
)(__NR_readlinkat
, VKI_AT_FDCWD
, (UWord
)path
,
305 return sr_isError(res
) ? -1 : sr_Res(res
);
308 Int
ML_(am_fcntl
) ( Int fd
, Int cmd
, Addr arg
)
310 # if defined(VGO_linux) || defined(VGO_solaris) || defined(VGO_freebsd)
311 # if defined(VGP_nanomips_linux)
312 SysRes res
= VG_(do_syscall3
)(__NR_fcntl64
, fd
, cmd
, arg
);
314 SysRes res
= VG_(do_syscall3
)(__NR_fcntl
, fd
, cmd
, arg
);
316 # elif defined(VGO_darwin)
317 SysRes res
= VG_(do_syscall3
)(__NR_fcntl_nocancel
, fd
, cmd
, arg
);
321 return sr_isError(res
) ? -1 : sr_Res(res
);
324 /* Get the dev, inode and mode info for a file descriptor, if
325 possible. Returns True on success. */
326 Bool
ML_(am_get_fd_d_i_m
)( Int fd
,
328 /*OUT*/ULong
* ino
, /*OUT*/UInt
* mode
)
330 # if defined(VGO_linux) || defined(VGO_darwin)
332 # if defined(VGO_linux)
333 /* First try with statx. */
334 struct vki_statx bufx
;
335 const char* file_name
= "";
336 res
= VG_(do_syscall5
)(__NR_statx
, fd
, (RegWord
)file_name
,
337 VKI_AT_EMPTY_PATH
, VKI_STATX_ALL
, (RegWord
)&bufx
);
338 if (!sr_isError(res
)) {
339 *dev
= VG_MAKEDEV(bufx
.stx_dev_major
, bufx
.stx_dev_minor
);
340 *ino
= (ULong
)bufx
.stx_ino
;
341 *mode
= (UInt
)bufx
.stx_mode
;
344 # endif // VGO_linux only
346 # if defined(VGO_linux) && defined(__NR_fstat64)
347 /* fstat64 is second candidate as it can cope with minor and major device
348 numbers outside the 0-255 range and it works properly for x86
349 binaries on amd64 systems where fstat seems to be broken. */
350 struct vki_stat64 buf64
;
351 res
= VG_(do_syscall2
)(__NR_fstat64
, fd
, (UWord
)&buf64
);
352 if (!sr_isError(res
)) {
353 *dev
= (ULong
)buf64
.st_dev
;
354 *ino
= (ULong
)buf64
.st_ino
;
355 *mode
= (UInt
) buf64
.st_mode
;
358 # endif // VGO_linux and defined __NR_fstat64
360 # if defined(__NR_fstat)
362 res
= VG_(do_syscall2
)(__NR_fstat
, fd
, (UWord
)&buf
);
363 if (!sr_isError(res
)) {
364 *dev
= (ULong
)buf
.st_dev
;
365 *ino
= (ULong
)buf
.st_ino
;
366 *mode
= (UInt
) buf
.st_mode
;
369 # endif // defined __NR_fstat
371 # elif defined(VGO_solaris)
372 # if defined(VGP_x86_solaris)
373 struct vki_stat64 buf64
;
374 SysRes res
= VG_(do_syscall4
)(__NR_fstatat64
, fd
, 0, (UWord
)&buf64
, 0);
375 # elif defined(VGP_amd64_solaris)
376 struct vki_stat buf64
;
377 SysRes res
= VG_(do_syscall4
)(__NR_fstatat
, fd
, 0, (UWord
)&buf64
, 0);
379 # error "Unknown platform"
381 if (!sr_isError(res
)) {
382 *dev
= (ULong
)buf64
.st_dev
;
383 *ino
= (ULong
)buf64
.st_ino
;
384 *mode
= (UInt
) buf64
.st_mode
;
388 # elif defined(VGO_freebsd)
389 #if (FREEBSD_VERS < FREEBSD_12)
390 struct vki_freebsd11_stat buf
;
391 SysRes res
= VG_(do_syscall2
)(__NR_fstat
, fd
, (UWord
)&buf
);
394 SysRes res
= VG_(do_syscall2
)(__NR_fstat
, fd
, (UWord
)&buf
);
396 if (!sr_isError(res
)) {
398 * This gets compared to the value obtained by sysctl KERN_PROC_VMMAP.
399 * For some reson that only uses 32bits, so truncate this to match
401 *dev
= (UInt
)buf
.st_dev
;
402 *ino
= (ULong
)buf
.st_ino
;
403 *mode
= (UInt
) buf
.st_mode
;
412 #if defined(VGO_freebsd)
413 #define M_FILEDESC_BUF 1000000
414 static Char filedesc_buf
[M_FILEDESC_BUF
];
417 Bool
ML_(am_resolve_filename
) ( Int fd
, /*OUT*/HChar
* buf
, Int nbuf
)
419 #if defined(VGO_linux)
421 HChar tmp
[64]; // large enough
422 for (i
= 0; i
< nbuf
; i
++) buf
[i
] = 0;
423 ML_(am_sprintf
)(tmp
, "/proc/self/fd/%d", fd
);
424 if (ML_(am_readlink
)(tmp
, buf
, nbuf
) > 0 && buf
[0] == '/')
429 #elif defined(VGO_freebsd)
437 struct vki_kinfo_file
*kf
;
439 mib
[0] = VKI_CTL_KERN
;
440 mib
[1] = VKI_KERN_PROC
;
441 mib
[2] = VKI_KERN_PROC_FILEDESC
;
442 mib
[3] = sr_Res(VG_(do_syscall0
)(__NR_getpid
));
443 len
= sizeof(filedesc_buf
);
444 sres
= VG_(do_syscall6
)(__NR___sysctl
, (UWord
)mib
, 4, (UWord
)filedesc_buf
,
446 if (sr_isError(sres
)) {
447 VG_(debugLog
)(0, "sysctl(kern.proc.filedesc)", "%s\n", VG_(strerror
)(sr_Err(sres
)));
450 /* Walk though the list. */
452 eb
= filedesc_buf
+ len
;
454 kf
= (struct vki_kinfo_file
*)bp
;
455 if (kf
->vki_kf_fd
== fd
)
457 bp
+= kf
->vki_kf_structsize
;
459 if (bp
>= eb
|| *kf
->vki_kf_path
== '\0')
460 VG_(strncpy
)( buf
, "[unknown]", nbuf
);
462 VG_(strncpy
)( buf
, kf
->vki_kf_path
, nbuf
);
465 // PJF it will be a relief to get rid of the above bit of ugliness
466 struct vki_kinfo_file kinfo_file
;
467 kinfo_file
.vki_kf_structsize
= VKI_KINFO_FILE_SIZE
;
468 if (0 == ML_(am_fcntl
) ( fd
, VKI_F_KINFO
, (Addr
)&kinfo_file
)) {
470 VG_(strncpy
)( buf
, kinfo_file
.vki_kf_path
, nbuf
< VKI_PATH_MAX
? nbuf
: VKI_PATH_MAX
);
473 if (buf
[0] == '/') return True
;
478 #elif defined(VGO_darwin)
479 HChar tmp
[VKI_MAXPATHLEN
+1];
480 if (0 == ML_(am_fcntl
)(fd
, VKI_F_GETPATH
, (UWord
)tmp
)) {
482 VG_(strncpy
)( buf
, tmp
, nbuf
< sizeof(tmp
) ? nbuf
: sizeof(tmp
) );
485 if (tmp
[0] == '/') return True
;
489 #elif defined(VGO_solaris)
492 for (i
= 0; i
< nbuf
; i
++) buf
[i
] = 0;
493 ML_(am_sprintf
)(tmp
, "/proc/self/path/%d", fd
);
494 if (ML_(am_readlink
)(tmp
, buf
, nbuf
) > 0 && buf
[0] == '/')
507 /*-----------------------------------------------------------------*/
509 /*--- Manage stacks for Valgrind itself. ---*/
511 /*-----------------------------------------------------------------*/
514 // We use a fake size of 1. A bigger size is allocated
515 // by VG_(am_alloc_VgStack).
518 /* Allocate and initialise a VgStack (anonymous valgrind space).
519 Protect the stack active area and the guard areas appropriately.
520 Returns NULL on failure, else the address of the bottom of the
521 stack. On success, also sets *initial_sp to what the stack pointer
524 VgStack
* VG_(am_alloc_VgStack
)( /*OUT*/Addr
* initial_sp
)
532 /* Allocate the stack. */
533 szB
= VG_STACK_GUARD_SZB
534 + VG_(clo_valgrind_stacksize
) + VG_STACK_GUARD_SZB
;
536 sres
= VG_(am_mmap_anon_float_valgrind
)( szB
);
537 if (sr_isError(sres
))
540 stack
= (VgStack
*)(Addr
)sr_Res(sres
);
542 aspacem_assert(VG_IS_PAGE_ALIGNED(szB
));
543 aspacem_assert(VG_IS_PAGE_ALIGNED(stack
));
545 /* Protect the guard areas. */
546 sres
= local_do_mprotect_NO_NOTIFY(
548 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
550 if (sr_isError(sres
)) goto protect_failed
;
551 VG_(am_notify_mprotect
)(
552 (Addr
) &stack
->bytes
[0],
553 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
556 sres
= local_do_mprotect_NO_NOTIFY(
557 (Addr
) &stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)],
558 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
560 if (sr_isError(sres
)) goto protect_failed
;
561 VG_(am_notify_mprotect
)(
562 (Addr
) &stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)],
563 VG_STACK_GUARD_SZB
, VKI_PROT_NONE
566 /* Looks good. Fill the active area with junk so we can later
567 tell how much got used. */
569 p
= (UInt
*)&stack
->bytes
[VG_STACK_GUARD_SZB
];
570 for (i
= 0; i
< VG_(clo_valgrind_stacksize
)/sizeof(UInt
); i
++)
573 *initial_sp
= (Addr
)&stack
->bytes
[VG_STACK_GUARD_SZB
+ VG_(clo_valgrind_stacksize
)];
575 *initial_sp
&= ~((Addr
)0x1F); /* 32-align it */
577 VG_(debugLog
)( 1,"aspacem",
578 "allocated valgrind thread stack at 0x%llx size %d\n",
579 (ULong
)(Addr
)stack
, szB
);
580 ML_(am_do_sanity_check
)();
584 /* The stack was allocated, but we can't protect it. Unmap it and
585 return NULL (failure). */
586 (void)ML_(am_do_munmap_NO_NOTIFY
)( (Addr
)stack
, szB
);
587 ML_(am_do_sanity_check
)();
592 /* Figure out how many bytes of the stack's active area have not
593 been used. Used for estimating if we are close to overflowing it. */
595 SizeT
VG_(am_get_VgStack_unused_szB
)( const VgStack
* stack
, SizeT limit
)
600 p
= (const UInt
*)&stack
->bytes
[VG_STACK_GUARD_SZB
];
601 for (i
= 0; i
< VG_(clo_valgrind_stacksize
)/sizeof(UInt
); i
++) {
602 if (p
[i
] != 0xDEADBEEF)
604 if (i
* sizeof(UInt
) >= limit
)
608 return i
* sizeof(UInt
);
611 Addr
VG_(am_valgrind_stack_low_addr
)( const VgStack
* stack
)
613 return (Addr
)&stack
->bytes
[VG_STACK_GUARD_SZB
];
616 /*--------------------------------------------------------------------*/
618 /*--------------------------------------------------------------------*/