2 /*--------------------------------------------------------------------*/
3 /*--- Implementation of POSIX signals. m_signals.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2013 Julian Seward
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
34 There are 4 distinct classes of signal:
36 1. Synchronous, instruction-generated (SIGILL, FPE, BUS, SEGV and
37 TRAP): these are signals as a result of an instruction fault. If
38 we get one while running client code, then we just do the
39 appropriate thing. If it happens while running Valgrind code, then
40 it indicates a Valgrind bug. Note that we "manually" implement
41 automatic stack growth, such that if a fault happens near the
42 client process stack, it is extended in the same way the kernel
43 would, and the fault is never reported to the client program.
45 2. Asynchronous variants of the above signals: If the kernel tries
46 to deliver a sync signal while it is blocked, it just kills the
47 process. Therefore, we can't block those signals if we want to be
48 able to report on bugs in Valgrind. This means that we're also
49 open to receiving those signals from other processes, sent with
50 kill. We could get away with just dropping them, since they aren't
51 really signals that processes send to each other.
53 3. Synchronous, general signals. If a thread/process sends itself
54 a signal with kill, its expected to be synchronous: ie, the signal
55 will have been delivered by the time the syscall finishes.
57 4. Asynchronous, general signals. All other signals, sent by
58 another process with kill. These are generally blocked, except for
59 two special cases: we poll for them each time we're about to run a
60 thread for a time quanta, and while running blocking syscalls.
63 In addition, we reserve one signal for internal use: SIGVGKILL.
64 SIGVGKILL is used to terminate threads. When one thread wants
65 another to exit, it will set its exitreason and send it SIGVGKILL
66 if it appears to be blocked in a syscall.
69 We use a kernel thread for each application thread. When the
70 thread allows itself to be open to signals, it sets the thread
71 signal mask to what the client application set it to. This means
72 that we get the kernel to do all signal routing: under Valgrind,
73 signals get delivered in the same way as in the non-Valgrind case
74 (the exception being for the sync signal set, since they're almost
81 First off, we take note of the client's requests (via sys_sigaction
82 and sys_sigprocmask) to set the signal state (handlers for each
83 signal, which are process-wide, + a mask for each signal, which is
84 per-thread). This info is duly recorded in the SCSS (static Client
85 signal state) in m_signals.c, and if the client later queries what
86 the state is, we merely fish the relevant info out of SCSS and give
89 However, we set the real signal state in the kernel to something
90 entirely different. This is recorded in SKSS, the static Kernel
91 signal state. What's nice (to the extent that anything is nice w.r.t
92 signals) is that there's a pure function to calculate SKSS from SCSS,
93 calculate_SKSS_from_SCSS. So when the client changes SCSS then we
94 recompute the associated SKSS and apply any changes from the previous
95 SKSS through to the kernel.
97 Now, that said, the general scheme we have now is, that regardless of
98 what the client puts into the SCSS (viz, asks for), what we would
99 like to do is as follows:
101 (1) run code on the virtual CPU with all signals blocked
103 (2) at convenient moments for us (that is, when the VCPU stops, and
104 control is back with the scheduler), ask the kernel "do you have
105 any signals for me?" and if it does, collect up the info, and
106 deliver them to the client (by building sigframes).
108 And that's almost what we do. The signal polling is done by
109 VG_(poll_signals), which calls through to VG_(sigtimedwait_zero) to
110 do the dirty work. (of which more later).
112 By polling signals, rather than catching them, we get to deal with
113 them only at convenient moments, rather than having to recover from
114 taking a signal while generated code is running.
116 Now unfortunately .. the above scheme only works for so-called async
117 signals. An async signal is one which isn't associated with any
118 particular instruction, eg Control-C (SIGINT). For those, it doesn't
119 matter if we don't deliver the signal to the client immediately; it
120 only matters that we deliver it eventually. Hence polling is OK.
122 But the other group -- sync signals -- are all related by the fact
123 that they are various ways for the host CPU to fail to execute an
124 instruction: SIGILL, SIGSEGV, SIGFPU. And they can't be deferred,
125 because obviously if a host instruction can't execute, well then we
126 have to immediately do Plan B, whatever that is.
128 So the next approximation of what happens is:
130 (1) run code on vcpu with all async signals blocked
132 (2) at convenient moments (when NOT running the vcpu), poll for async
135 (1) and (2) together imply that if the host does deliver a signal to
136 async_signalhandler while the VCPU is running, something's
139 (3) when running code on vcpu, don't block sync signals. Instead
140 register sync_signalhandler and catch any such via that. Of
141 course, that means an ugly recovery path if we do -- the
142 sync_signalhandler has to longjump, exiting out of the generated
143 code, and the assembly-dispatcher thingy that runs it, and gets
144 caught in m_scheduler, which then tells m_signals to deliver the
147 Now naturally (ha ha) even that might be tolerable, but there's
148 something worse: dealing with signals delivered to threads in
151 Obviously from the above, SKSS's signal mask (viz, what we really run
152 with) is way different from SCSS's signal mask (viz, what the client
153 thread thought it asked for). (eg) It may well be that the client
154 did not block control-C, so that it just expects to drop dead if it
155 receives ^C whilst blocked in a syscall, but by default we are
156 running with all async signals blocked, and so that signal could be
157 arbitrarily delayed, or perhaps even lost (not sure).
159 So what we have to do, when doing any syscall which SfMayBlock, is to
160 quickly switch in the SCSS-specified signal mask just before the
161 syscall, and switch it back just afterwards, and hope that we don't
162 get caught up in some wierd race condition. This is the primary
163 purpose of the ultra-magical pieces of assembly code in
164 coregrind/m_syswrap/syscall-<plat>.S
168 The ways in which V can come to hear of signals that need to be
169 forwarded to the client as are follows:
171 sync signals: can arrive at any time whatsoever. These are caught
172 by sync_signalhandler
176 if running generated code
177 then these are blocked, so we don't expect to catch them in
181 if thread is blocked in a syscall marked SfMayBlock
182 then signals may be delivered to async_sighandler, since we
183 temporarily unblocked them for the duration of the syscall,
184 by using the real (SCSS) mask for this thread
186 else we're doing misc housekeeping activities (eg, making a translation,
187 washing our hair, etc). As in the normal case, these signals are
188 blocked, but we can and do poll for them using VG_(poll_signals).
190 Now, re VG_(poll_signals), it polls the kernel by doing
191 VG_(sigtimedwait_zero). This is trivial on Linux, since it's just a
192 syscall. But on Darwin and AIX, we have to cobble together the
193 functionality in a tedious, longwinded and probably error-prone way.
195 Finally, if a gdb is debugging the process under valgrind,
196 the signal can be ignored if gdb tells this. So, before resuming the
197 scheduler/delivering the signal, a call to VG_(gdbserver_report_signal)
198 is done. If this returns True, the signal is delivered.
201 #include "pub_core_basics.h"
202 #include "pub_core_vki.h"
203 #include "pub_core_vkiscnums.h"
204 #include "pub_core_debuglog.h"
205 #include "pub_core_libcsetjmp.h" // to keep _threadstate.h happy
206 #include "pub_core_threadstate.h"
207 #include "pub_core_xarray.h"
208 #include "pub_core_clientstate.h"
209 #include "pub_core_aspacemgr.h"
210 #include "pub_core_debugger.h" // For VG_(start_debugger)
211 #include "pub_core_errormgr.h"
212 #include "pub_core_gdbserver.h"
213 #include "pub_core_libcbase.h"
214 #include "pub_core_libcassert.h"
215 #include "pub_core_libcprint.h"
216 #include "pub_core_libcproc.h"
217 #include "pub_core_libcsignal.h"
218 #include "pub_core_machine.h"
219 #include "pub_core_mallocfree.h"
220 #include "pub_core_options.h"
221 #include "pub_core_scheduler.h"
222 #include "pub_core_signals.h"
223 #include "pub_core_sigframe.h" // For VG_(sigframe_create)()
224 #include "pub_core_stacks.h" // For VG_(change_stack)()
225 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
226 #include "pub_core_syscall.h"
227 #include "pub_core_syswrap.h"
228 #include "pub_core_tooliface.h"
229 #include "pub_core_coredump.h"
232 /* ---------------------------------------------------------------------
234 ------------------------------------------------------------------ */
236 static void sync_signalhandler ( Int sigNo
, vki_siginfo_t
*info
,
237 struct vki_ucontext
* );
238 static void async_signalhandler ( Int sigNo
, vki_siginfo_t
*info
,
239 struct vki_ucontext
* );
240 static void sigvgkill_handler ( Int sigNo
, vki_siginfo_t
*info
,
241 struct vki_ucontext
* );
243 /* Maximum usable signal. */
244 Int
VG_(max_signal
) = _VKI_NSIG
;
246 #define N_QUEUED_SIGNALS 8
248 typedef struct SigQueue
{
250 vki_siginfo_t sigs
[N_QUEUED_SIGNALS
];
253 /* ------ Macros for pulling stuff out of ucontexts ------ */
255 /* Q: what does VG_UCONTEXT_SYSCALL_SYSRES do? A: let's suppose the
256 machine context (uc) reflects the situation that a syscall had just
257 completed, quite literally -- that is, that the program counter was
258 now at the instruction following the syscall. (or we're slightly
259 downstream, but we're sure no relevant register has yet changed
260 value.) Then VG_UCONTEXT_SYSCALL_SYSRES returns a SysRes reflecting
261 the result of the syscall; it does this by fishing relevant bits of
262 the machine state out of the uc. Of course if the program counter
263 was somewhere else entirely then the result is likely to be
264 meaningless, so the caller of VG_UCONTEXT_SYSCALL_SYSRES has to be
265 very careful to pay attention to the results only when it is sure
266 that the said constraint on the program counter is indeed valid. */
268 #if defined(VGP_x86_linux)
269 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.eip)
270 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.esp)
271 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
272 /* Convert the value in uc_mcontext.eax into a SysRes. */ \
273 VG_(mk_SysRes_x86_linux)( (uc)->uc_mcontext.eax )
274 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
275 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.eip); \
276 (srP)->r_sp = (ULong)((uc)->uc_mcontext.esp); \
277 (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.ebp; \
280 #elif defined(VGP_amd64_linux)
281 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.rip)
282 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.rsp)
283 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
284 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
285 VG_(mk_SysRes_amd64_linux)( (uc)->uc_mcontext.rax )
286 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
287 { (srP)->r_pc = (uc)->uc_mcontext.rip; \
288 (srP)->r_sp = (uc)->uc_mcontext.rsp; \
289 (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.rbp; \
292 #elif defined(VGP_ppc32_linux)
293 /* Comments from Paul Mackerras 25 Nov 05:
295 > I'm tracking down a problem where V's signal handling doesn't
296 > work properly on a ppc440gx running 2.4.20. The problem is that
297 > the ucontext being presented to V's sighandler seems completely
300 > V's kernel headers and hence ucontext layout are derived from
301 > 2.6.9. I compared include/asm-ppc/ucontext.h from 2.4.20 and
304 > Can I just check my interpretation: the 2.4.20 one contains the
305 > uc_mcontext field in line, whereas the 2.6.13 one has a pointer
306 > to said struct? And so if V is using the 2.6.13 struct then a
307 > 2.4.20 one will make no sense to it.
309 Not quite... what is inline in the 2.4.20 version is a
310 sigcontext_struct, not an mcontext. The sigcontext looks like
313 struct sigcontext_struct {
314 unsigned long _unused[4];
316 unsigned long handler;
317 unsigned long oldmask;
318 struct pt_regs *regs;
321 The regs pointer of that struct ends up at the same offset as the
322 uc_regs of the 2.6 struct ucontext, and a struct pt_regs is the
323 same as the mc_gregs field of the mcontext. In fact the integer
324 regs are followed in memory by the floating point regs on 2.4.20.
326 Thus if you are using the 2.6 definitions, it should work on 2.4.20
327 provided that you go via uc->uc_regs rather than looking in
328 uc->uc_mcontext directly.
330 There is another subtlety: 2.4.20 doesn't save the vector regs when
331 delivering a signal, and 2.6.x only saves the vector regs if the
332 process has ever used an altivec instructions. If 2.6.x does save
333 the vector regs, it sets the MSR_VEC bit in
334 uc->uc_regs->mc_gregs[PT_MSR], otherwise it clears it. That bit
335 will always be clear under 2.4.20. So you can use that bit to tell
336 whether uc->uc_regs->mc_vregs is valid. */
337 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_NIP])
338 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_regs->mc_gregs[VKI_PT_R1])
339 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
340 /* Convert the values in uc_mcontext r3,cr into a SysRes. */ \
341 VG_(mk_SysRes_ppc32_linux)( \
342 (uc)->uc_regs->mc_gregs[VKI_PT_R3], \
343 (((uc)->uc_regs->mc_gregs[VKI_PT_CCR] >> 28) & 1) \
345 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
346 { (srP)->r_pc = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_NIP]); \
347 (srP)->r_sp = (ULong)((uc)->uc_regs->mc_gregs[VKI_PT_R1]); \
348 (srP)->misc.PPC32.r_lr = (uc)->uc_regs->mc_gregs[VKI_PT_LNK]; \
351 #elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
352 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_NIP])
353 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.gp_regs[VKI_PT_R1])
354 /* Dubious hack: if there is an error, only consider the lowest 8
355 bits of r3. memcheck/tests/post-syscall shows a case where an
356 interrupted syscall should have produced a ucontext with 0x4
357 (VKI_EINTR) in r3 but is in fact producing 0x204. */
358 /* Awaiting clarification from PaulM. Evidently 0x204 is
359 ERESTART_RESTARTBLOCK, which shouldn't have made it into user
361 static inline SysRes
VG_UCONTEXT_SYSCALL_SYSRES( struct vki_ucontext
* uc
)
363 ULong err
= (uc
->uc_mcontext
.gp_regs
[VKI_PT_CCR
] >> 28) & 1;
364 ULong r3
= uc
->uc_mcontext
.gp_regs
[VKI_PT_R3
];
366 return VG_(mk_SysRes_ppc64_linux
)( r3
, err
);
368 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
369 { (srP)->r_pc = (uc)->uc_mcontext.gp_regs[VKI_PT_NIP]; \
370 (srP)->r_sp = (uc)->uc_mcontext.gp_regs[VKI_PT_R1]; \
371 (srP)->misc.PPC64.r_lr = (uc)->uc_mcontext.gp_regs[VKI_PT_LNK]; \
374 #elif defined(VGP_arm_linux)
375 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.arm_pc)
376 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.arm_sp)
377 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
378 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
379 VG_(mk_SysRes_arm_linux)( (uc)->uc_mcontext.arm_r0 )
380 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
381 { (srP)->r_pc = (uc)->uc_mcontext.arm_pc; \
382 (srP)->r_sp = (uc)->uc_mcontext.arm_sp; \
383 (srP)->misc.ARM.r14 = (uc)->uc_mcontext.arm_lr; \
384 (srP)->misc.ARM.r12 = (uc)->uc_mcontext.arm_ip; \
385 (srP)->misc.ARM.r11 = (uc)->uc_mcontext.arm_fp; \
386 (srP)->misc.ARM.r7 = (uc)->uc_mcontext.arm_r7; \
389 #elif defined(VGP_arm64_linux)
390 # define VG_UCONTEXT_INSTR_PTR(uc) ((UWord)((uc)->uc_mcontext.pc))
391 # define VG_UCONTEXT_STACK_PTR(uc) ((UWord)((uc)->uc_mcontext.sp))
392 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
393 /* Convert the value in uc_mcontext.regs[0] into a SysRes. */ \
394 VG_(mk_SysRes_arm64_linux)( (uc)->uc_mcontext.regs[0] )
395 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
396 { (srP)->r_pc = (uc)->uc_mcontext.pc; \
397 (srP)->r_sp = (uc)->uc_mcontext.sp; \
398 (srP)->misc.ARM64.x29 = (uc)->uc_mcontext.regs[29]; \
399 (srP)->misc.ARM64.x30 = (uc)->uc_mcontext.regs[30]; \
402 #elif defined(VGP_x86_darwin)
404 static inline Addr
VG_UCONTEXT_INSTR_PTR( void* ucV
) {
405 ucontext_t
* uc
= (ucontext_t
*)ucV
;
406 struct __darwin_mcontext32
* mc
= uc
->uc_mcontext
;
407 struct __darwin_i386_thread_state
* ss
= &mc
->__ss
;
410 static inline Addr
VG_UCONTEXT_STACK_PTR( void* ucV
) {
411 ucontext_t
* uc
= (ucontext_t
*)ucV
;
412 struct __darwin_mcontext32
* mc
= uc
->uc_mcontext
;
413 struct __darwin_i386_thread_state
* ss
= &mc
->__ss
;
416 static inline SysRes
VG_UCONTEXT_SYSCALL_SYSRES( void* ucV
,
418 /* this is complicated by the problem that there are 3 different
419 kinds of syscalls, each with its own return convention.
420 NB: scclass is a host word, hence UWord is good for both
421 amd64-darwin and x86-darwin */
422 ucontext_t
* uc
= (ucontext_t
*)ucV
;
423 struct __darwin_mcontext32
* mc
= uc
->uc_mcontext
;
424 struct __darwin_i386_thread_state
* ss
= &mc
->__ss
;
425 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
426 UInt carry
= 1 & ss
->__eflags
;
431 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
436 case VG_DARWIN_SYSCALL_CLASS_MACH
:
439 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
446 return VG_(mk_SysRes_x86_darwin
)( scclass
, err
? True
: False
,
450 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs
* srP
,
452 ucontext_t
* uc
= (ucontext_t
*)(ucV
);
453 struct __darwin_mcontext32
* mc
= uc
->uc_mcontext
;
454 struct __darwin_i386_thread_state
* ss
= &mc
->__ss
;
455 srP
->r_pc
= (ULong
)(ss
->__eip
);
456 srP
->r_sp
= (ULong
)(ss
->__esp
);
457 srP
->misc
.X86
.r_ebp
= (UInt
)(ss
->__ebp
);
460 #elif defined(VGP_amd64_darwin)
462 static inline Addr
VG_UCONTEXT_INSTR_PTR( void* ucV
) {
463 ucontext_t
* uc
= (ucontext_t
*)ucV
;
464 struct __darwin_mcontext64
* mc
= uc
->uc_mcontext
;
465 struct __darwin_x86_thread_state64
* ss
= &mc
->__ss
;
468 static inline Addr
VG_UCONTEXT_STACK_PTR( void* ucV
) {
469 ucontext_t
* uc
= (ucontext_t
*)ucV
;
470 struct __darwin_mcontext64
* mc
= uc
->uc_mcontext
;
471 struct __darwin_x86_thread_state64
* ss
= &mc
->__ss
;
474 static inline SysRes
VG_UCONTEXT_SYSCALL_SYSRES( void* ucV
,
476 /* This is copied from the x86-darwin case. I'm not sure if it
478 ucontext_t
* uc
= (ucontext_t
*)ucV
;
479 struct __darwin_mcontext64
* mc
= uc
->uc_mcontext
;
480 struct __darwin_x86_thread_state64
* ss
= &mc
->__ss
;
481 /* duplicates logic in m_syswrap.getSyscallStatusFromGuestState */
482 ULong carry
= 1 & ss
->__rflags
;
487 case VG_DARWIN_SYSCALL_CLASS_UNIX
:
492 case VG_DARWIN_SYSCALL_CLASS_MACH
:
495 case VG_DARWIN_SYSCALL_CLASS_MDEP
:
502 return VG_(mk_SysRes_amd64_darwin
)( scclass
, err
? True
: False
,
506 void VG_UCONTEXT_TO_UnwindStartRegs( UnwindStartRegs
* srP
,
508 ucontext_t
* uc
= (ucontext_t
*)ucV
;
509 struct __darwin_mcontext64
* mc
= uc
->uc_mcontext
;
510 struct __darwin_x86_thread_state64
* ss
= &mc
->__ss
;
511 srP
->r_pc
= (ULong
)(ss
->__rip
);
512 srP
->r_sp
= (ULong
)(ss
->__rsp
);
513 srP
->misc
.AMD64
.r_rbp
= (ULong
)(ss
->__rbp
);
516 #elif defined(VGP_s390x_linux)
518 # define VG_UCONTEXT_INSTR_PTR(uc) ((uc)->uc_mcontext.regs.psw.addr)
519 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.regs.gprs[15])
520 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.regs.gprs[11])
521 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
522 VG_(mk_SysRes_s390x_linux)((uc)->uc_mcontext.regs.gprs[2])
523 # define VG_UCONTEXT_LINK_REG(uc) ((uc)->uc_mcontext.regs.gprs[14])
525 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
526 { (srP)->r_pc = (ULong)((uc)->uc_mcontext.regs.psw.addr); \
527 (srP)->r_sp = (ULong)((uc)->uc_mcontext.regs.gprs[15]); \
528 (srP)->misc.S390X.r_fp = (uc)->uc_mcontext.regs.gprs[11]; \
529 (srP)->misc.S390X.r_lr = (uc)->uc_mcontext.regs.gprs[14]; \
532 #elif defined(VGP_mips32_linux)
533 # define VG_UCONTEXT_INSTR_PTR(uc) ((UWord)(((uc)->uc_mcontext.sc_pc)))
534 # define VG_UCONTEXT_STACK_PTR(uc) ((UWord)((uc)->uc_mcontext.sc_regs[29]))
535 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.sc_regs[30])
536 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.sc_regs[2])
537 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
538 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
539 VG_(mk_SysRes_mips32_linux)( (uc)->uc_mcontext.sc_regs[2], \
540 (uc)->uc_mcontext.sc_regs[3], \
541 (uc)->uc_mcontext.sc_regs[7])
543 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
544 { (srP)->r_pc = (uc)->uc_mcontext.sc_pc; \
545 (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29]; \
546 (srP)->misc.MIPS32.r30 = (uc)->uc_mcontext.sc_regs[30]; \
547 (srP)->misc.MIPS32.r31 = (uc)->uc_mcontext.sc_regs[31]; \
548 (srP)->misc.MIPS32.r28 = (uc)->uc_mcontext.sc_regs[28]; \
551 #elif defined(VGP_mips64_linux)
552 # define VG_UCONTEXT_INSTR_PTR(uc) (((uc)->uc_mcontext.sc_pc))
553 # define VG_UCONTEXT_STACK_PTR(uc) ((uc)->uc_mcontext.sc_regs[29])
554 # define VG_UCONTEXT_FRAME_PTR(uc) ((uc)->uc_mcontext.sc_regs[30])
555 # define VG_UCONTEXT_SYSCALL_NUM(uc) ((uc)->uc_mcontext.sc_regs[2])
556 # define VG_UCONTEXT_SYSCALL_SYSRES(uc) \
557 /* Convert the value in uc_mcontext.rax into a SysRes. */ \
558 VG_(mk_SysRes_mips64_linux)((uc)->uc_mcontext.sc_regs[2], \
559 (uc)->uc_mcontext.sc_regs[3], \
560 (uc)->uc_mcontext.sc_regs[7])
562 # define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc) \
563 { (srP)->r_pc = (uc)->uc_mcontext.sc_pc; \
564 (srP)->r_sp = (uc)->uc_mcontext.sc_regs[29]; \
565 (srP)->misc.MIPS64.r30 = (uc)->uc_mcontext.sc_regs[30]; \
566 (srP)->misc.MIPS64.r31 = (uc)->uc_mcontext.sc_regs[31]; \
567 (srP)->misc.MIPS64.r28 = (uc)->uc_mcontext.sc_regs[28]; \
571 # error Unknown platform
575 /* ------ Macros for pulling stuff out of siginfos ------ */
577 /* These macros allow use of uniform names when working with
578 both the Linux and AIX vki definitions. */
579 #if defined(VGO_linux)
580 # define VKI_SIGINFO_si_addr _sifields._sigfault._addr
581 # define VKI_SIGINFO_si_pid _sifields._kill._pid
582 #elif defined(VGO_darwin)
583 # define VKI_SIGINFO_si_addr si_addr
584 # define VKI_SIGINFO_si_pid si_pid
590 /* ---------------------------------------------------------------------
591 HIGH LEVEL STUFF TO DO WITH SIGNALS: POLICY (MOSTLY)
592 ------------------------------------------------------------------ */
594 /* ---------------------------------------------------------------------
595 Signal state for this process.
596 ------------------------------------------------------------------ */
599 /* Base-ment of these arrays[_VKI_NSIG].
601 Valid signal numbers are 1 .. _VKI_NSIG inclusive.
602 Rather than subtracting 1 for indexing these arrays, which
603 is tedious and error-prone, they are simply dimensioned 1 larger,
604 and entry [0] is not used.
608 /* -----------------------------------------------------
609 Static client signal state (SCSS). This is the state
610 that the client thinks it has the kernel in.
611 SCSS records verbatim the client's settings. These
612 are mashed around only when SKSS is calculated from it.
613 -------------------------------------------------- */
617 void* scss_handler
; /* VKI_SIG_DFL or VKI_SIG_IGN or ptr to
620 vki_sigset_t scss_mask
;
621 void* scss_restorer
; /* where sigreturn goes */
622 void* scss_sa_tramp
; /* sa_tramp setting, Darwin only */
623 /* re _restorer and _sa_tramp, we merely record the values
624 supplied when the client does 'sigaction' and give them back
625 when requested. Otherwise they are simply ignored. */
631 /* per-signal info */
632 SCSS_Per_Signal scss_per_sig
[1+_VKI_NSIG
];
634 /* Additional elements to SCSS not stored here:
635 - for each thread, the thread's blocking mask
636 - for each thread in WaitSIG, the set of waited-on sigs
644 /* -----------------------------------------------------
645 Static kernel signal state (SKSS). This is the state
646 that we have the kernel in. It is computed from SCSS.
647 -------------------------------------------------- */
650 sigprocmask assigns to all thread masks
651 so that at least everything is always consistent
653 SA_SIGINFO -- we always set it, and honour it for the client
654 SA_NOCLDSTOP -- passed to kernel
655 SA_ONESHOT or SA_RESETHAND -- pass through
656 SA_RESTART -- we observe this but set our handlers to always restart
657 SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
658 SA_ONSTACK -- pass through
659 SA_NOCLDWAIT -- pass through
665 void* skss_handler
; /* VKI_SIG_DFL or VKI_SIG_IGN
666 or ptr to our handler */
668 /* There is no skss_mask, since we know that we will always ask
669 for all signals to be blocked in our sighandlers. */
670 /* Also there is no skss_restorer. */
676 SKSS_Per_Signal skss_per_sig
[1+_VKI_NSIG
];
682 /* returns True if signal is to be ignored.
683 To check this, possibly call gdbserver with tid. */
684 static Bool
is_sig_ign(Int sigNo
, ThreadId tid
)
686 vg_assert(sigNo
>= 1 && sigNo
<= _VKI_NSIG
);
688 return scss
.scss_per_sig
[sigNo
].scss_handler
== VKI_SIG_IGN
689 || !VG_(gdbserver_report_signal
) (sigNo
, tid
);
692 /* ---------------------------------------------------------------------
693 Compute the SKSS required by the current SCSS.
694 ------------------------------------------------------------------ */
697 void pp_SKSS ( void )
700 VG_(printf
)("\n\nSKSS:\n");
701 for (sig
= 1; sig
<= _VKI_NSIG
; sig
++) {
702 VG_(printf
)("sig %d: handler %p, flags 0x%x\n", sig
,
703 skss
.skss_per_sig
[sig
].skss_handler
,
704 skss
.skss_per_sig
[sig
].skss_flags
);
709 /* This is the core, clever bit. Computation is as follows:
712 handler = if client has a handler, then our handler
713 else if client is DFL, then our handler as well
714 else (client must be IGN)
718 void calculate_SKSS_from_SCSS ( SKSS
* dst
)
724 for (sig
= 1; sig
<= _VKI_NSIG
; sig
++) {
728 scss_handler
= scss
.scss_per_sig
[sig
].scss_handler
;
729 scss_flags
= scss
.scss_per_sig
[sig
].scss_flags
;
737 /* For these, we always want to catch them and report, even
738 if the client code doesn't. */
739 skss_handler
= sync_signalhandler
;
743 /* Let the kernel handle SIGCONT unless the client is actually
748 /* For signals which are have a default action of Ignore,
749 only set a handler if the client has set a signal handler.
750 Otherwise the kernel will interrupt a syscall which
751 wouldn't have otherwise been interrupted. */
752 if (scss
.scss_per_sig
[sig
].scss_handler
== VKI_SIG_DFL
)
753 skss_handler
= VKI_SIG_DFL
;
754 else if (scss
.scss_per_sig
[sig
].scss_handler
== VKI_SIG_IGN
)
755 skss_handler
= VKI_SIG_IGN
;
757 skss_handler
= async_signalhandler
;
761 // VKI_SIGVG* are runtime variables, so we can't make them
762 // cases in the switch, so we handle them in the 'default' case.
763 if (sig
== VG_SIGVGKILL
)
764 skss_handler
= sigvgkill_handler
;
766 if (scss_handler
== VKI_SIG_IGN
)
767 skss_handler
= VKI_SIG_IGN
;
769 skss_handler
= async_signalhandler
;
778 /* SA_NOCLDSTOP, SA_NOCLDWAIT: pass to kernel */
779 skss_flags
|= scss_flags
& (VKI_SA_NOCLDSTOP
| VKI_SA_NOCLDWAIT
);
781 /* SA_ONESHOT: ignore client setting */
783 /* SA_RESTART: ignore client setting and always set it for us.
784 Though we never rely on the kernel to restart a
785 syscall, we observe whether it wanted to restart the syscall
786 or not, which is needed by
787 VG_(fixup_guest_state_after_syscall_interrupted) */
788 skss_flags
|= VKI_SA_RESTART
;
790 /* SA_NOMASK: ignore it */
792 /* SA_ONSTACK: client setting is irrelevant here */
793 /* We don't set a signal stack, so ignore */
795 /* always ask for SA_SIGINFO */
796 skss_flags
|= VKI_SA_SIGINFO
;
798 /* use our own restorer */
799 skss_flags
|= VKI_SA_RESTORER
;
801 /* Create SKSS entry for this signal. */
802 if (sig
!= VKI_SIGKILL
&& sig
!= VKI_SIGSTOP
)
803 dst
->skss_per_sig
[sig
].skss_handler
= skss_handler
;
805 dst
->skss_per_sig
[sig
].skss_handler
= VKI_SIG_DFL
;
807 dst
->skss_per_sig
[sig
].skss_flags
= skss_flags
;
811 vg_assert(dst
->skss_per_sig
[VKI_SIGKILL
].skss_handler
== VKI_SIG_DFL
);
812 vg_assert(dst
->skss_per_sig
[VKI_SIGSTOP
].skss_handler
== VKI_SIG_DFL
);
819 /* ---------------------------------------------------------------------
820 After a possible SCSS change, update SKSS and the kernel itself.
821 ------------------------------------------------------------------ */
823 // We need two levels of macro-expansion here to convert __NR_rt_sigreturn
824 // to a number before converting it to a string... sigh.
825 extern void my_sigreturn(void);
827 #if defined(VGP_x86_linux)
828 # define _MY_SIGRETURN(name) \
830 ".globl my_sigreturn\n" \
832 " movl $" #name ", %eax\n" \
836 #elif defined(VGP_amd64_linux)
837 # define _MY_SIGRETURN(name) \
839 ".globl my_sigreturn\n" \
841 " movq $" #name ", %rax\n" \
845 #elif defined(VGP_ppc32_linux)
846 # define _MY_SIGRETURN(name) \
848 ".globl my_sigreturn\n" \
850 " li 0, " #name "\n" \
854 #elif defined(VGP_ppc64be_linux)
855 # define _MY_SIGRETURN(name) \
857 ".globl my_sigreturn\n" \
858 ".section \".opd\",\"aw\"\n" \
861 ".quad .my_sigreturn,.TOC.@tocbase,0\n" \
863 ".type .my_sigreturn,@function\n" \
864 ".globl .my_sigreturn\n" \
866 " li 0, " #name "\n" \
869 #elif defined(VGP_ppc64le_linux)
870 /* Little Endian supports ELF version 2. In the future, it may
871 * support other versions.
873 # define _MY_SIGRETURN(name) \
875 ".globl my_sigreturn\n" \
876 ".type .my_sigreturn,@function\n" \
878 "#if _CALL_ELF == 2 \n" \
879 "0: addis 2,12,.TOC.-0b@ha\n" \
880 " addi 2,2,.TOC.-0b@l\n" \
881 " .localentry my_sigreturn,.-my_sigreturn\n" \
884 " .size my_sigreturn,.-my_sigreturn\n"
886 #elif defined(VGP_arm_linux)
887 # define _MY_SIGRETURN(name) \
889 ".globl my_sigreturn\n" \
890 "my_sigreturn:\n\t" \
891 " mov r7, #" #name "\n\t" \
892 " svc 0x00000000\n" \
895 #elif defined(VGP_arm64_linux)
896 # define _MY_SIGRETURN(name) \
898 ".globl my_sigreturn\n" \
899 "my_sigreturn:\n\t" \
900 " mov x8, #" #name "\n\t" \
904 #elif defined(VGP_x86_darwin)
905 # define _MY_SIGRETURN(name) \
907 ".globl my_sigreturn\n" \
909 "movl $" VG_STRINGIFY(__NR_DARWIN_FAKE_SIGRETURN) ",%eax\n" \
912 #elif defined(VGP_amd64_darwin)
914 # define _MY_SIGRETURN(name) \
916 ".globl my_sigreturn\n" \
920 #elif defined(VGP_s390x_linux)
921 # define _MY_SIGRETURN(name) \
923 ".globl my_sigreturn\n" \
928 #elif defined(VGP_mips32_linux)
929 # define _MY_SIGRETURN(name) \
932 " li $2, " #name "\n" /* apparently $2 is v0 */ \
936 #elif defined(VGP_mips64_linux)
937 # define _MY_SIGRETURN(name) \
940 " li $2, " #name "\n" \
945 # error Unknown platform
948 #define MY_SIGRETURN(name) _MY_SIGRETURN(name)
950 MY_SIGRETURN(__NR_rt_sigreturn
)
954 static void handle_SCSS_change ( Bool force_update
)
958 vki_sigaction_toK_t ksa
;
959 vki_sigaction_fromK_t ksa_old
;
961 /* Remember old SKSS and calculate new one. */
963 calculate_SKSS_from_SCSS ( &skss
);
965 /* Compare the new SKSS entries vs the old ones, and update kernel
966 where they differ. */
967 for (sig
= 1; sig
<= VG_(max_signal
); sig
++) {
969 /* Trying to do anything with SIGKILL is pointless; just ignore
971 if (sig
== VKI_SIGKILL
|| sig
== VKI_SIGSTOP
)
975 if ((skss_old
.skss_per_sig
[sig
].skss_handler
976 == skss
.skss_per_sig
[sig
].skss_handler
)
977 && (skss_old
.skss_per_sig
[sig
].skss_flags
978 == skss
.skss_per_sig
[sig
].skss_flags
))
983 ksa
.ksa_handler
= skss
.skss_per_sig
[sig
].skss_handler
;
984 ksa
.sa_flags
= skss
.skss_per_sig
[sig
].skss_flags
;
985 # if !defined(VGP_ppc32_linux) && \
986 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
987 !defined(VGP_mips32_linux)
988 ksa
.sa_restorer
= my_sigreturn
;
990 /* Re above ifdef (also the assertion below), PaulM says:
991 The sa_restorer field is not used at all on ppc. Glibc
992 converts the sigaction you give it into a kernel sigaction,
993 but it doesn't put anything in the sa_restorer field.
996 /* block all signals in handler */
997 VG_(sigfillset
)( &ksa
.sa_mask
);
998 VG_(sigdelset
)( &ksa
.sa_mask
, VKI_SIGKILL
);
999 VG_(sigdelset
)( &ksa
.sa_mask
, VKI_SIGSTOP
);
1001 if (VG_(clo_trace_signals
) && VG_(clo_verbosity
) > 2)
1002 VG_(dmsg
)("setting ksig %d to: hdlr %p, flags 0x%lx, "
1003 "mask(msb..lsb) 0x%llx 0x%llx\n",
1004 sig
, ksa
.ksa_handler
,
1005 (UWord
)ksa
.sa_flags
,
1006 _VKI_NSIG_WORDS
> 1 ? (ULong
)ksa
.sa_mask
.sig
[1] : 0,
1007 (ULong
)ksa
.sa_mask
.sig
[0]);
1009 res
= VG_(sigaction
)( sig
, &ksa
, &ksa_old
);
1010 vg_assert(res
== 0);
1012 /* Since we got the old sigaction more or less for free, might
1013 as well extract the maximum sanity-check value from it. */
1014 if (!force_update
) {
1015 vg_assert(ksa_old
.ksa_handler
1016 == skss_old
.skss_per_sig
[sig
].skss_handler
);
1017 vg_assert(ksa_old
.sa_flags
1018 == skss_old
.skss_per_sig
[sig
].skss_flags
);
1019 # if !defined(VGP_ppc32_linux) && \
1020 !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
1021 !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux)
1022 vg_assert(ksa_old
.sa_restorer
== my_sigreturn
);
1024 VG_(sigaddset
)( &ksa_old
.sa_mask
, VKI_SIGKILL
);
1025 VG_(sigaddset
)( &ksa_old
.sa_mask
, VKI_SIGSTOP
);
1026 vg_assert(VG_(isfullsigset
)( &ksa_old
.sa_mask
));
1032 /* ---------------------------------------------------------------------
1033 Update/query SCSS in accordance with client requests.
1034 ------------------------------------------------------------------ */
1036 /* Logic for this alt-stack stuff copied directly from do_sigaltstack
1037 in kernel/signal.[ch] */
1039 /* True if we are on the alternate signal stack. */
1040 static Bool
on_sig_stack ( ThreadId tid
, Addr m_SP
)
1042 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1044 return (m_SP
- (Addr
)tst
->altstack
.ss_sp
< (Addr
)tst
->altstack
.ss_size
);
1047 static Int
sas_ss_flags ( ThreadId tid
, Addr m_SP
)
1049 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1051 return (tst
->altstack
.ss_size
== 0
1053 : on_sig_stack(tid
, m_SP
) ? VKI_SS_ONSTACK
: 0);
1057 SysRes
VG_(do_sys_sigaltstack
) ( ThreadId tid
, vki_stack_t
* ss
, vki_stack_t
* oss
)
1061 vg_assert(VG_(is_valid_tid
)(tid
));
1062 m_SP
= VG_(get_SP
)(tid
);
1064 if (VG_(clo_trace_signals
))
1065 VG_(dmsg
)("sys_sigaltstack: tid %d, "
1066 "ss %p{%p,sz=%llu,flags=0x%llx}, oss %p (current SP %p)\n",
1069 (ULong
)(ss
? ss
->ss_size
: 0),
1070 (ULong
)(ss
? ss
->ss_flags
: 0),
1071 (void*)oss
, (void*)m_SP
);
1074 oss
->ss_sp
= VG_(threads
)[tid
].altstack
.ss_sp
;
1075 oss
->ss_size
= VG_(threads
)[tid
].altstack
.ss_size
;
1076 oss
->ss_flags
= VG_(threads
)[tid
].altstack
.ss_flags
1077 | sas_ss_flags(tid
, m_SP
);
1081 if (on_sig_stack(tid
, VG_(get_SP
)(tid
))) {
1082 return VG_(mk_SysRes_Error
)( VKI_EPERM
);
1084 if (ss
->ss_flags
!= VKI_SS_DISABLE
1085 && ss
->ss_flags
!= VKI_SS_ONSTACK
1086 && ss
->ss_flags
!= 0) {
1087 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
1089 if (ss
->ss_flags
== VKI_SS_DISABLE
) {
1090 VG_(threads
)[tid
].altstack
.ss_flags
= VKI_SS_DISABLE
;
1092 if (ss
->ss_size
< VKI_MINSIGSTKSZ
) {
1093 return VG_(mk_SysRes_Error
)( VKI_ENOMEM
);
1096 VG_(threads
)[tid
].altstack
.ss_sp
= ss
->ss_sp
;
1097 VG_(threads
)[tid
].altstack
.ss_size
= ss
->ss_size
;
1098 VG_(threads
)[tid
].altstack
.ss_flags
= 0;
1101 return VG_(mk_SysRes_Success
)( 0 );
1105 SysRes
VG_(do_sys_sigaction
) ( Int signo
,
1106 const vki_sigaction_toK_t
* new_act
,
1107 vki_sigaction_fromK_t
* old_act
)
1109 if (VG_(clo_trace_signals
))
1110 VG_(dmsg
)("sys_sigaction: sigNo %d, "
1111 "new %#lx, old %#lx, new flags 0x%llx\n",
1112 signo
, (UWord
)new_act
, (UWord
)old_act
,
1113 (ULong
)(new_act
? new_act
->sa_flags
: 0));
1115 /* Rule out various error conditions. The aim is to ensure that if
1116 when the call is passed to the kernel it will definitely
1119 /* Reject out-of-range signal numbers. */
1120 if (signo
< 1 || signo
> VG_(max_signal
)) goto bad_signo
;
1122 /* don't let them use our signals */
1123 if ( (signo
> VG_SIGVGRTUSERMAX
)
1125 && !(new_act
->ksa_handler
== VKI_SIG_DFL
1126 || new_act
->ksa_handler
== VKI_SIG_IGN
) )
1127 goto bad_signo_reserved
;
1129 /* Reject attempts to set a handler (or set ignore) for SIGKILL. */
1130 if ( (signo
== VKI_SIGKILL
|| signo
== VKI_SIGSTOP
)
1132 && new_act
->ksa_handler
!= VKI_SIG_DFL
)
1133 goto bad_sigkill_or_sigstop
;
1135 /* If the client supplied non-NULL old_act, copy the relevant SCSS
1138 old_act
->ksa_handler
= scss
.scss_per_sig
[signo
].scss_handler
;
1139 old_act
->sa_flags
= scss
.scss_per_sig
[signo
].scss_flags
;
1140 old_act
->sa_mask
= scss
.scss_per_sig
[signo
].scss_mask
;
1141 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1142 old_act
->sa_restorer
= scss
.scss_per_sig
[signo
].scss_restorer
;
1146 /* And now copy new SCSS entry from new_act. */
1148 scss
.scss_per_sig
[signo
].scss_handler
= new_act
->ksa_handler
;
1149 scss
.scss_per_sig
[signo
].scss_flags
= new_act
->sa_flags
;
1150 scss
.scss_per_sig
[signo
].scss_mask
= new_act
->sa_mask
;
1152 scss
.scss_per_sig
[signo
].scss_restorer
= NULL
;
1153 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1154 scss
.scss_per_sig
[signo
].scss_restorer
= new_act
->sa_restorer
;
1157 scss
.scss_per_sig
[signo
].scss_sa_tramp
= NULL
;
1158 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
1159 scss
.scss_per_sig
[signo
].scss_sa_tramp
= new_act
->sa_tramp
;
1162 VG_(sigdelset
)(&scss
.scss_per_sig
[signo
].scss_mask
, VKI_SIGKILL
);
1163 VG_(sigdelset
)(&scss
.scss_per_sig
[signo
].scss_mask
, VKI_SIGSTOP
);
1166 /* All happy bunnies ... */
1168 handle_SCSS_change( False
/* lazy update */ );
1170 return VG_(mk_SysRes_Success
)( 0 );
1173 if (VG_(showing_core_errors
)() && !VG_(clo_xml
)) {
1174 VG_(umsg
)("Warning: bad signal number %d in sigaction()\n", signo
);
1176 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
1179 if (VG_(showing_core_errors
)() && !VG_(clo_xml
)) {
1180 VG_(umsg
)("Warning: ignored attempt to set %s handler in sigaction();\n",
1181 VG_(signame
)(signo
));
1182 VG_(umsg
)(" the %s signal is used internally by Valgrind\n",
1183 VG_(signame
)(signo
));
1185 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
1187 bad_sigkill_or_sigstop
:
1188 if (VG_(showing_core_errors
)() && !VG_(clo_xml
)) {
1189 VG_(umsg
)("Warning: ignored attempt to set %s handler in sigaction();\n",
1190 VG_(signame
)(signo
));
1191 VG_(umsg
)(" the %s signal is uncatchable\n",
1192 VG_(signame
)(signo
));
1194 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
1199 void do_sigprocmask_bitops ( Int vki_how
,
1200 vki_sigset_t
* orig_set
,
1201 vki_sigset_t
* modifier
)
1205 VG_(sigaddset_from_set
)( orig_set
, modifier
);
1207 case VKI_SIG_UNBLOCK
:
1208 VG_(sigdelset_from_set
)( orig_set
, modifier
);
1210 case VKI_SIG_SETMASK
:
1211 *orig_set
= *modifier
;
1214 VG_(core_panic
)("do_sigprocmask_bitops");
1220 HChar
* format_sigset ( const vki_sigset_t
* set
)
1222 static HChar buf
[128];
1225 VG_(strcpy
)(buf
, "");
1227 for (w
= _VKI_NSIG_WORDS
- 1; w
>= 0; w
--)
1229 # if _VKI_NSIG_BPW == 32
1230 VG_(sprintf
)(buf
+ VG_(strlen
)(buf
), "%08llx",
1231 set
? (ULong
)set
->sig
[w
] : 0);
1232 # elif _VKI_NSIG_BPW == 64
1233 VG_(sprintf
)(buf
+ VG_(strlen
)(buf
), "%16llx",
1234 set
? (ULong
)set
->sig
[w
] : 0);
1236 # error "Unsupported value for _VKI_NSIG_BPW"
1244 This updates the thread's signal mask. There's no such thing as a
1245 process-wide signal mask.
1247 Note that the thread signal masks are an implicit part of SCSS,
1248 which is why this routine is allowed to mess with them.
1251 void do_setmask ( ThreadId tid
,
1253 vki_sigset_t
* newset
,
1254 vki_sigset_t
* oldset
)
1256 if (VG_(clo_trace_signals
))
1257 VG_(dmsg
)("do_setmask: tid = %d how = %d (%s), newset = %p (%s)\n",
1259 how
==VKI_SIG_BLOCK
? "SIG_BLOCK" : (
1260 how
==VKI_SIG_UNBLOCK
? "SIG_UNBLOCK" : (
1261 how
==VKI_SIG_SETMASK
? "SIG_SETMASK" : "???")),
1262 newset
, newset
? format_sigset(newset
) : "NULL" );
1264 /* Just do this thread. */
1265 vg_assert(VG_(is_valid_tid
)(tid
));
1267 *oldset
= VG_(threads
)[tid
].sig_mask
;
1268 if (VG_(clo_trace_signals
))
1269 VG_(dmsg
)("\toldset=%p %s\n", oldset
, format_sigset(oldset
));
1272 do_sigprocmask_bitops (how
, &VG_(threads
)[tid
].sig_mask
, newset
);
1273 VG_(sigdelset
)(&VG_(threads
)[tid
].sig_mask
, VKI_SIGKILL
);
1274 VG_(sigdelset
)(&VG_(threads
)[tid
].sig_mask
, VKI_SIGSTOP
);
1275 VG_(threads
)[tid
].tmp_sig_mask
= VG_(threads
)[tid
].sig_mask
;
1280 SysRes
VG_(do_sys_sigprocmask
) ( ThreadId tid
,
1283 vki_sigset_t
* oldset
)
1287 case VKI_SIG_UNBLOCK
:
1288 case VKI_SIG_SETMASK
:
1289 vg_assert(VG_(is_valid_tid
)(tid
));
1290 do_setmask ( tid
, how
, set
, oldset
);
1291 return VG_(mk_SysRes_Success
)( 0 );
1294 VG_(dmsg
)("sigprocmask: unknown 'how' field %d\n", how
);
1295 return VG_(mk_SysRes_Error
)( VKI_EINVAL
);
1300 /* ---------------------------------------------------------------------
1301 LOW LEVEL STUFF TO DO WITH SIGNALS: IMPLEMENTATION
1302 ------------------------------------------------------------------ */
1304 /* ---------------------------------------------------------------------
1305 Handy utilities to block/restore all host signals.
1306 ------------------------------------------------------------------ */
1308 /* Block all host signals, dumping the old mask in *saved_mask. */
1309 static void block_all_host_signals ( /* OUT */ vki_sigset_t
* saved_mask
)
1312 vki_sigset_t block_procmask
;
1313 VG_(sigfillset
)(&block_procmask
);
1314 ret
= VG_(sigprocmask
)
1315 (VKI_SIG_SETMASK
, &block_procmask
, saved_mask
);
1316 vg_assert(ret
== 0);
1319 /* Restore the blocking mask using the supplied saved one. */
1320 static void restore_all_host_signals ( /* IN */ vki_sigset_t
* saved_mask
)
1323 ret
= VG_(sigprocmask
)(VKI_SIG_SETMASK
, saved_mask
, NULL
);
1324 vg_assert(ret
== 0);
1327 void VG_(clear_out_queued_signals
)( ThreadId tid
, vki_sigset_t
* saved_mask
)
1329 block_all_host_signals(saved_mask
);
1330 if (VG_(threads
)[tid
].sig_queue
!= NULL
) {
1331 VG_(free
)(VG_(threads
)[tid
].sig_queue
);
1332 VG_(threads
)[tid
].sig_queue
= NULL
;
1334 restore_all_host_signals(saved_mask
);
1337 /* ---------------------------------------------------------------------
1338 The signal simulation proper. A simplified version of what the
1340 ------------------------------------------------------------------ */
1342 /* Set up a stack frame (VgSigContext) for the client's signal
1345 void push_signal_frame ( ThreadId tid
, const vki_siginfo_t
*siginfo
,
1346 const struct vki_ucontext
*uc
)
1348 Addr esp_top_of_frame
;
1350 Int sigNo
= siginfo
->si_signo
;
1352 vg_assert(sigNo
>= 1 && sigNo
<= VG_(max_signal
));
1353 vg_assert(VG_(is_valid_tid
)(tid
));
1354 tst
= & VG_(threads
)[tid
];
1356 if (VG_(clo_trace_signals
)) {
1357 VG_(dmsg
)("push_signal_frame (thread %d): signal %d\n", tid
, sigNo
);
1358 VG_(get_and_pp_StackTrace
)(tid
, 10);
1361 if (/* this signal asked to run on an alt stack */
1362 (scss
.scss_per_sig
[sigNo
].scss_flags
& VKI_SA_ONSTACK
)
1363 && /* there is a defined and enabled alt stack, which we're not
1364 already using. Logic from get_sigframe in
1365 arch/i386/kernel/signal.c. */
1366 sas_ss_flags(tid
, VG_(get_SP
)(tid
)) == 0
1369 = (Addr
)(tst
->altstack
.ss_sp
) + tst
->altstack
.ss_size
;
1370 if (VG_(clo_trace_signals
))
1371 VG_(dmsg
)("delivering signal %d (%s) to thread %d: "
1372 "on ALT STACK (%p-%p; %ld bytes)\n",
1373 sigNo
, VG_(signame
)(sigNo
), tid
, tst
->altstack
.ss_sp
,
1374 (UChar
*)tst
->altstack
.ss_sp
+ tst
->altstack
.ss_size
,
1375 (Word
)tst
->altstack
.ss_size
);
1377 /* Signal delivery to tools */
1378 VG_TRACK( pre_deliver_signal
, tid
, sigNo
, /*alt_stack*/True
);
1381 esp_top_of_frame
= VG_(get_SP
)(tid
) - VG_STACK_REDZONE_SZB
;
1383 /* Signal delivery to tools */
1384 VG_TRACK( pre_deliver_signal
, tid
, sigNo
, /*alt_stack*/False
);
1387 vg_assert(scss
.scss_per_sig
[sigNo
].scss_handler
!= VKI_SIG_IGN
);
1388 vg_assert(scss
.scss_per_sig
[sigNo
].scss_handler
!= VKI_SIG_DFL
);
1390 /* This may fail if the client stack is busted; if that happens,
1391 the whole process will exit rather than simply calling the
1393 VG_(sigframe_create
) (tid
, esp_top_of_frame
, siginfo
, uc
,
1394 scss
.scss_per_sig
[sigNo
].scss_handler
,
1395 scss
.scss_per_sig
[sigNo
].scss_flags
,
1397 scss
.scss_per_sig
[sigNo
].scss_restorer
);
1401 const HChar
*VG_(signame
)(Int sigNo
)
1403 static HChar buf
[20];
1406 case VKI_SIGHUP
: return "SIGHUP";
1407 case VKI_SIGINT
: return "SIGINT";
1408 case VKI_SIGQUIT
: return "SIGQUIT";
1409 case VKI_SIGILL
: return "SIGILL";
1410 case VKI_SIGTRAP
: return "SIGTRAP";
1411 case VKI_SIGABRT
: return "SIGABRT";
1412 case VKI_SIGBUS
: return "SIGBUS";
1413 case VKI_SIGFPE
: return "SIGFPE";
1414 case VKI_SIGKILL
: return "SIGKILL";
1415 case VKI_SIGUSR1
: return "SIGUSR1";
1416 case VKI_SIGUSR2
: return "SIGUSR2";
1417 case VKI_SIGSEGV
: return "SIGSEGV";
1418 case VKI_SIGPIPE
: return "SIGPIPE";
1419 case VKI_SIGALRM
: return "SIGALRM";
1420 case VKI_SIGTERM
: return "SIGTERM";
1421 # if defined(VKI_SIGSTKFLT)
1422 case VKI_SIGSTKFLT
: return "SIGSTKFLT";
1424 case VKI_SIGCHLD
: return "SIGCHLD";
1425 case VKI_SIGCONT
: return "SIGCONT";
1426 case VKI_SIGSTOP
: return "SIGSTOP";
1427 case VKI_SIGTSTP
: return "SIGTSTP";
1428 case VKI_SIGTTIN
: return "SIGTTIN";
1429 case VKI_SIGTTOU
: return "SIGTTOU";
1430 case VKI_SIGURG
: return "SIGURG";
1431 case VKI_SIGXCPU
: return "SIGXCPU";
1432 case VKI_SIGXFSZ
: return "SIGXFSZ";
1433 case VKI_SIGVTALRM
: return "SIGVTALRM";
1434 case VKI_SIGPROF
: return "SIGPROF";
1435 case VKI_SIGWINCH
: return "SIGWINCH";
1436 case VKI_SIGIO
: return "SIGIO";
1437 # if defined(VKI_SIGPWR)
1438 case VKI_SIGPWR
: return "SIGPWR";
1440 # if defined(VKI_SIGUNUSED)
1441 case VKI_SIGUNUSED
: return "SIGUNUSED";
1444 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1445 case VKI_SIGRTMIN
... VKI_SIGRTMAX
:
1446 VG_(sprintf
)(buf
, "SIGRT%d", sigNo
-VKI_SIGRTMIN
);
1451 VG_(sprintf
)(buf
, "SIG%d", sigNo
);
1456 /* Hit ourselves with a signal using the default handler */
1457 void VG_(kill_self
)(Int sigNo
)
1460 vki_sigset_t mask
, origmask
;
1461 vki_sigaction_toK_t sa
, origsa2
;
1462 vki_sigaction_fromK_t origsa
;
1464 sa
.ksa_handler
= VKI_SIG_DFL
;
1466 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
1469 VG_(sigemptyset
)(&sa
.sa_mask
);
1471 VG_(sigaction
)(sigNo
, &sa
, &origsa
);
1473 VG_(sigemptyset
)(&mask
);
1474 VG_(sigaddset
)(&mask
, sigNo
);
1475 VG_(sigprocmask
)(VKI_SIG_UNBLOCK
, &mask
, &origmask
);
1477 r
= VG_(kill
)(VG_(getpid
)(), sigNo
);
1478 # if defined(VGO_linux)
1479 /* This sometimes fails with EPERM on Darwin. I don't know why. */
1483 VG_(convert_sigaction_fromK_to_toK
)( &origsa
, &origsa2
);
1484 VG_(sigaction
)(sigNo
, &origsa2
, NULL
);
1485 VG_(sigprocmask
)(VKI_SIG_SETMASK
, &origmask
, NULL
);
1488 // The si_code describes where the signal came from. Some come from the
1489 // kernel, eg.: seg faults, illegal opcodes. Some come from the user, eg.:
1490 // from kill() (SI_USER), or timer_settime() (SI_TIMER), or an async I/O
1491 // request (SI_ASYNCIO). There's lots of implementation-defined leeway in
1492 // POSIX, but the user vs. kernal distinction is what we want here. We also
1493 // pass in some other details that can help when si_code is unreliable.
1494 static Bool
is_signal_from_kernel(ThreadId tid
, int signum
, int si_code
)
1496 # if defined(VGO_linux)
1497 // On Linux, SI_USER is zero, negative values are from the user, positive
1498 // values are from the kernel. There are SI_FROMUSER and SI_FROMKERNEL
1499 // macros but we don't use them here because other platforms don't have
1501 return ( si_code
> VKI_SI_USER
? True
: False
);
1503 # elif defined(VGO_darwin)
1504 // On Darwin 9.6.0, the si_code is completely unreliable. It should be the
1505 // case that 0 means "user", and >0 means "kernel". But:
1506 // - For SIGSEGV, it seems quite reliable.
1507 // - For SIGBUS, it's always 2.
1508 // - For SIGFPE, it's often 0, even for kernel ones (eg.
1509 // div-by-integer-zero always gives zero).
1510 // - For SIGILL, it's unclear.
1511 // - For SIGTRAP, it's always 1.
1512 // You can see the "NOTIMP" (not implemented) status of a number of the
1513 // sub-cases in sys/signal.h. Hopefully future versions of Darwin will
1516 // If we're blocked waiting on a syscall, it must be a user signal, because
1517 // the kernel won't generate sync signals within syscalls.
1518 if (VG_(threads
)[tid
].status
== VgTs_WaitSys
) {
1521 // If it's a SIGSEGV, use the proper condition, since it's fairly reliable.
1522 } else if (SIGSEGV
== signum
) {
1523 return ( si_code
> 0 ? True
: False
);
1525 // If it's anything else, assume it's kernel-generated. Reason being that
1526 // kernel-generated sync signals are more common, and it's probable that
1527 // misdiagnosing a user signal as a kernel signal is better than the
1537 // This is an arbitrary si_code that we only use internally. It corresponds
1538 // to the value SI_KERNEL on Linux, but that's not really of any significance
1539 // as far as I can determine.
1540 #define VKI_SEGV_MADE_UP_GPF 0x80
1543 Perform the default action of a signal. If the signal is fatal, it
1544 marks all threads as needing to exit, but it doesn't actually kill
1545 the process or thread.
1547 If we're not being quiet, then print out some more detail about
1548 fatal signals (esp. core dumping signals).
1550 static void default_action(const vki_siginfo_t
*info
, ThreadId tid
)
1552 Int sigNo
= info
->si_signo
;
1553 Bool terminate
= False
; /* kills process */
1554 Bool core
= False
; /* kills process w/ core */
1555 struct vki_rlimit corelim
;
1558 vg_assert(VG_(is_running_thread
)(tid
));
1561 case VKI_SIGQUIT
: /* core */
1562 case VKI_SIGILL
: /* core */
1563 case VKI_SIGABRT
: /* core */
1564 case VKI_SIGFPE
: /* core */
1565 case VKI_SIGSEGV
: /* core */
1566 case VKI_SIGBUS
: /* core */
1567 case VKI_SIGTRAP
: /* core */
1568 case VKI_SIGXCPU
: /* core */
1569 case VKI_SIGXFSZ
: /* core */
1574 case VKI_SIGHUP
: /* term */
1575 case VKI_SIGINT
: /* term */
1576 case VKI_SIGKILL
: /* term - we won't see this */
1577 case VKI_SIGPIPE
: /* term */
1578 case VKI_SIGALRM
: /* term */
1579 case VKI_SIGTERM
: /* term */
1580 case VKI_SIGUSR1
: /* term */
1581 case VKI_SIGUSR2
: /* term */
1582 case VKI_SIGIO
: /* term */
1583 # if defined(VKI_SIGPWR)
1584 case VKI_SIGPWR
: /* term */
1586 case VKI_SIGSYS
: /* term */
1587 case VKI_SIGPROF
: /* term */
1588 case VKI_SIGVTALRM
: /* term */
1589 # if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
1590 case VKI_SIGRTMIN
... VKI_SIGRTMAX
: /* term */
1596 vg_assert(!core
|| (core
&& terminate
));
1598 if (VG_(clo_trace_signals
))
1599 VG_(dmsg
)("delivering %d (code %d) to default handler; action: %s%s\n",
1600 sigNo
, info
->si_code
, terminate
? "terminate" : "ignore",
1601 core
? "+core" : "");
1604 return; /* nothing to do */
1609 /* If they set the core-size limit to zero, don't generate a
1612 VG_(getrlimit
)(VKI_RLIMIT_CORE
, &corelim
);
1614 if (corelim
.rlim_cur
== 0)
1618 if ( (VG_(clo_verbosity
) > 1 ||
1619 (could_core
&& is_signal_from_kernel(tid
, sigNo
, info
->si_code
))
1624 "Process terminating with default action of signal %d (%s)%s\n",
1625 sigNo
, VG_(signame
)(sigNo
), core
? ": dumping core" : "");
1627 /* Be helpful - decode some more details about this fault */
1628 if (is_signal_from_kernel(tid
, sigNo
, info
->si_code
)) {
1629 const HChar
*event
= NULL
;
1630 Bool haveaddr
= True
;
1634 switch(info
->si_code
) {
1635 case VKI_SEGV_MAPERR
: event
= "Access not within mapped region";
1637 case VKI_SEGV_ACCERR
: event
= "Bad permissions for mapped region";
1639 case VKI_SEGV_MADE_UP_GPF
:
1640 /* General Protection Fault: The CPU/kernel
1641 isn't telling us anything useful, but this
1642 is commonly the result of exceeding a
1644 event
= "General Protection Fault";
1651 VG_(am_show_nsegments
)(0,"post segfault");
1652 VG_(sprintf
)(buf
, "/bin/cat /proc/%d/maps", VG_(getpid
)());
1659 switch(info
->si_code
) {
1660 case VKI_ILL_ILLOPC
: event
= "Illegal opcode"; break;
1661 case VKI_ILL_ILLOPN
: event
= "Illegal operand"; break;
1662 case VKI_ILL_ILLADR
: event
= "Illegal addressing mode"; break;
1663 case VKI_ILL_ILLTRP
: event
= "Illegal trap"; break;
1664 case VKI_ILL_PRVOPC
: event
= "Privileged opcode"; break;
1665 case VKI_ILL_PRVREG
: event
= "Privileged register"; break;
1666 case VKI_ILL_COPROC
: event
= "Coprocessor error"; break;
1667 case VKI_ILL_BADSTK
: event
= "Internal stack error"; break;
1672 switch (info
->si_code
) {
1673 case VKI_FPE_INTDIV
: event
= "Integer divide by zero"; break;
1674 case VKI_FPE_INTOVF
: event
= "Integer overflow"; break;
1675 case VKI_FPE_FLTDIV
: event
= "FP divide by zero"; break;
1676 case VKI_FPE_FLTOVF
: event
= "FP overflow"; break;
1677 case VKI_FPE_FLTUND
: event
= "FP underflow"; break;
1678 case VKI_FPE_FLTRES
: event
= "FP inexact"; break;
1679 case VKI_FPE_FLTINV
: event
= "FP invalid operation"; break;
1680 case VKI_FPE_FLTSUB
: event
= "FP subscript out of range"; break;
1685 switch (info
->si_code
) {
1686 case VKI_BUS_ADRALN
: event
= "Invalid address alignment"; break;
1687 case VKI_BUS_ADRERR
: event
= "Non-existent physical address"; break;
1688 case VKI_BUS_OBJERR
: event
= "Hardware error"; break;
1691 } /* switch (sigNo) */
1693 if (event
!= NULL
) {
1695 VG_(umsg
)(" %s at address %p\n",
1696 event
, info
->VKI_SIGINFO_si_addr
);
1698 VG_(umsg
)(" %s\n", event
);
1701 /* Print a stack trace. Be cautious if the thread's SP is in an
1702 obviously stupid place (not mapped readable) that would
1703 likely cause a segfault. */
1704 if (VG_(is_valid_tid
)(tid
)) {
1705 Word first_ip_delta
= 0;
1706 #if defined(VGO_linux)
1707 /* Make sure that the address stored in the stack pointer is
1708 located in a mapped page. That is not necessarily so. E.g.
1709 consider the scenario where the stack pointer was decreased
1710 and now has a value that is just below the end of a page that has
1711 not been mapped yet. In that case VG_(am_is_valid_for_client)
1712 will consider the address of the stack pointer invalid and that
1713 would cause a back-trace of depth 1 to be printed, instead of a
1715 if (tid
== 1) { // main thread
1716 Addr esp
= VG_(get_SP
)(tid
);
1717 Addr base
= VG_PGROUNDDN(esp
- VG_STACK_REDZONE_SZB
);
1718 if (VG_(extend_stack
)(base
, VG_(threads
)[tid
].client_stack_szB
)) {
1719 if (VG_(clo_trace_signals
))
1720 VG_(dmsg
)(" -> extended stack base to %#lx\n",
1725 #if defined(VGA_s390x)
1726 if (sigNo
== VKI_SIGILL
) {
1727 /* The guest instruction address has been adjusted earlier to
1728 point to the insn following the one that could not be decoded.
1729 When printing the back-trace here we need to undo that
1730 adjustment so the first line in the back-trace reports the
1732 Addr addr
= (Addr
)info
->VKI_SIGINFO_si_addr
;
1733 UChar byte
= ((UChar
*)addr
)[0];
1734 Int insn_length
= ((((byte
>> 6) + 1) >> 1) + 1) << 1;
1736 first_ip_delta
= -insn_length
;
1739 ExeContext
* ec
= VG_(am_is_valid_for_client
)
1740 (VG_(get_SP
)(tid
), sizeof(Addr
), VKI_PROT_READ
)
1741 ? VG_(record_ExeContext
)( tid
, first_ip_delta
)
1742 : VG_(record_depth_1_ExeContext
)( tid
,
1745 VG_(pp_ExeContext
)( ec
);
1747 if (sigNo
== VKI_SIGSEGV
1748 && is_signal_from_kernel(tid
, sigNo
, info
->si_code
)
1749 && info
->si_code
== VKI_SEGV_MAPERR
) {
1750 VG_(umsg
)(" If you believe this happened as a result of a stack\n" );
1751 VG_(umsg
)(" overflow in your program's main thread (unlikely but\n");
1752 VG_(umsg
)(" possible), you can try to increase the size of the\n" );
1753 VG_(umsg
)(" main thread stack using the --main-stacksize= flag.\n" );
1754 // FIXME: assumes main ThreadId == 1
1755 if (VG_(is_valid_tid
)(1)) {
1757 " The main thread stack size used in this run was %lu.\n",
1758 VG_(threads
)[1].client_stack_szB
);
1763 if (VG_(clo_vgdb
) != Vg_VgdbNo
1764 && VG_(dyn_vgdb_error
) <= VG_(get_n_errs_shown
)() + 1) {
1765 /* Note: we add + 1 to n_errs_shown as the fatal signal was not
1766 reported through error msg, and so was not counted. */
1767 VG_(gdbserver_report_fatal_signal
) (sigNo
, tid
);
1770 if (VG_(is_action_requested
)( "Attach to debugger", & VG_(clo_db_attach
) )) {
1771 VG_(start_debugger
)( tid
);
1775 const static struct vki_rlimit zero
= { 0, 0 };
1777 VG_(make_coredump
)(tid
, info
, corelim
.rlim_cur
);
1779 /* Make sure we don't get a confusing kernel-generated
1780 coredump when we finally exit */
1781 VG_(setrlimit
)(VKI_RLIMIT_CORE
, &zero
);
1784 /* stash fatal signal in main thread */
1786 //VG_(threads)[VG_(master_tid)].os_state.fatalsig = sigNo;
1789 VG_(nuke_all_threads_except
)(tid
, VgSrc_FatalSig
);
1790 VG_(threads
)[tid
].exitreason
= VgSrc_FatalSig
;
1791 VG_(threads
)[tid
].os_state
.fatalsig
= sigNo
;
1795 This does the business of delivering a signal to a thread. It may
1796 be called from either a real signal handler, or from normal code to
1797 cause the thread to enter the signal handler.
1799 This updates the thread state, but it does not set it to be
1802 static void deliver_signal ( ThreadId tid
, const vki_siginfo_t
*info
,
1803 const struct vki_ucontext
*uc
)
1805 Int sigNo
= info
->si_signo
;
1806 SCSS_Per_Signal
*handler
= &scss
.scss_per_sig
[sigNo
];
1808 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1810 if (VG_(clo_trace_signals
))
1811 VG_(dmsg
)("delivering signal %d (%s):%d to thread %d\n",
1812 sigNo
, VG_(signame
)(sigNo
), info
->si_code
, tid
);
1814 if (sigNo
== VG_SIGVGKILL
) {
1815 /* If this is a SIGVGKILL, we're expecting it to interrupt any
1816 blocked syscall. It doesn't matter whether the VCPU state is
1817 set to restart or not, because we don't expect it will
1818 execute any more client instructions. */
1819 vg_assert(VG_(is_exiting
)(tid
));
1823 /* If the client specifies SIG_IGN, treat it as SIG_DFL.
1825 If deliver_signal() is being called on a thread, we want
1826 the signal to get through no matter what; if they're ignoring
1827 it, then we do this override (this is so we can send it SIGSEGV,
1829 handler_fn
= handler
->scss_handler
;
1830 if (handler_fn
== VKI_SIG_IGN
)
1831 handler_fn
= VKI_SIG_DFL
;
1833 vg_assert(handler_fn
!= VKI_SIG_IGN
);
1835 if (handler_fn
== VKI_SIG_DFL
) {
1836 default_action(info
, tid
);
1838 /* Create a signal delivery frame, and set the client's %ESP and
1839 %EIP so that when execution continues, we will enter the
1840 signal handler with the frame on top of the client's stack,
1843 Signal delivery can fail if the client stack is too small or
1844 missing, and we can't push the frame. If that happens,
1845 push_signal_frame will cause the whole process to exit when
1846 we next hit the scheduler.
1848 vg_assert(VG_(is_valid_tid
)(tid
));
1850 push_signal_frame ( tid
, info
, uc
);
1852 if (handler
->scss_flags
& VKI_SA_ONESHOT
) {
1853 /* Do the ONESHOT thing. */
1854 handler
->scss_handler
= VKI_SIG_DFL
;
1856 handle_SCSS_change( False
/* lazy update */ );
1860 tst->sig_mask is the current signal mask
1861 tst->tmp_sig_mask is the same as sig_mask, unless we're in sigsuspend
1862 handler->scss_mask is the mask set by the handler
1864 Handler gets a mask of tmp_sig_mask|handler_mask|signo
1866 tst
->sig_mask
= tst
->tmp_sig_mask
;
1867 if (!(handler
->scss_flags
& VKI_SA_NOMASK
)) {
1868 VG_(sigaddset_from_set
)(&tst
->sig_mask
, &handler
->scss_mask
);
1869 VG_(sigaddset
)(&tst
->sig_mask
, sigNo
);
1870 tst
->tmp_sig_mask
= tst
->sig_mask
;
1874 /* Thread state is ready to go - just add Runnable */
1877 static void resume_scheduler(ThreadId tid
)
1879 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
1881 vg_assert(tst
->os_state
.lwpid
== VG_(gettid
)());
1883 if (tst
->sched_jmpbuf_valid
) {
1884 /* Can't continue; must longjmp back to the scheduler and thus
1885 enter the sighandler immediately. */
1886 VG_MINIMAL_LONGJMP(tst
->sched_jmpbuf
);
1890 static void synth_fault_common(ThreadId tid
, Addr addr
, Int si_code
)
1894 vg_assert(VG_(threads
)[tid
].status
== VgTs_Runnable
);
1896 VG_(memset
)(&info
, 0, sizeof(info
));
1897 info
.si_signo
= VKI_SIGSEGV
;
1898 info
.si_code
= si_code
;
1899 info
.VKI_SIGINFO_si_addr
= (void*)addr
;
1901 /* Even if gdbserver indicates to ignore the signal, we must deliver it.
1902 So ignore the return value of VG_(gdbserver_report_signal). */
1903 (void) VG_(gdbserver_report_signal
) (VKI_SIGSEGV
, tid
);
1905 /* If they're trying to block the signal, force it to be delivered */
1906 if (VG_(sigismember
)(&VG_(threads
)[tid
].sig_mask
, VKI_SIGSEGV
))
1907 VG_(set_default_handler
)(VKI_SIGSEGV
);
1909 deliver_signal(tid
, &info
, NULL
);
1912 // Synthesize a fault where the address is OK, but the page
1913 // permissions are bad.
1914 void VG_(synth_fault_perms
)(ThreadId tid
, Addr addr
)
1916 synth_fault_common(tid
, addr
, VKI_SEGV_ACCERR
);
1919 // Synthesize a fault where the address there's nothing mapped at the address.
1920 void VG_(synth_fault_mapping
)(ThreadId tid
, Addr addr
)
1922 synth_fault_common(tid
, addr
, VKI_SEGV_MAPERR
);
1925 // Synthesize a misc memory fault.
1926 void VG_(synth_fault
)(ThreadId tid
)
1928 synth_fault_common(tid
, 0, VKI_SEGV_MADE_UP_GPF
);
1931 // Synthesise a SIGILL.
1932 void VG_(synth_sigill
)(ThreadId tid
, Addr addr
)
1936 vg_assert(VG_(threads
)[tid
].status
== VgTs_Runnable
);
1938 VG_(memset
)(&info
, 0, sizeof(info
));
1939 info
.si_signo
= VKI_SIGILL
;
1940 info
.si_code
= VKI_ILL_ILLOPC
; /* jrs: no idea what this should be */
1941 info
.VKI_SIGINFO_si_addr
= (void*)addr
;
1943 if (VG_(gdbserver_report_signal
) (VKI_SIGILL
, tid
)) {
1944 resume_scheduler(tid
);
1945 deliver_signal(tid
, &info
, NULL
);
1948 resume_scheduler(tid
);
1951 // Synthesise a SIGBUS.
1952 void VG_(synth_sigbus
)(ThreadId tid
)
1956 vg_assert(VG_(threads
)[tid
].status
== VgTs_Runnable
);
1958 VG_(memset
)(&info
, 0, sizeof(info
));
1959 info
.si_signo
= VKI_SIGBUS
;
1960 /* There are several meanings to SIGBUS (as per POSIX, presumably),
1961 but the most widely understood is "invalid address alignment",
1962 so let's use that. */
1963 info
.si_code
= VKI_BUS_ADRALN
;
1964 /* If we knew the invalid address in question, we could put it
1965 in .si_addr. Oh well. */
1966 /* info.VKI_SIGINFO_si_addr = (void*)addr; */
1968 if (VG_(gdbserver_report_signal
) (VKI_SIGBUS
, tid
)) {
1969 resume_scheduler(tid
);
1970 deliver_signal(tid
, &info
, NULL
);
1973 resume_scheduler(tid
);
1976 // Synthesise a SIGTRAP.
1977 void VG_(synth_sigtrap
)(ThreadId tid
)
1980 struct vki_ucontext uc
;
1981 # if defined(VGP_x86_darwin)
1982 struct __darwin_mcontext32 mc
;
1983 # elif defined(VGP_amd64_darwin)
1984 struct __darwin_mcontext64 mc
;
1987 vg_assert(VG_(threads
)[tid
].status
== VgTs_Runnable
);
1989 VG_(memset
)(&info
, 0, sizeof(info
));
1990 VG_(memset
)(&uc
, 0, sizeof(uc
));
1991 info
.si_signo
= VKI_SIGTRAP
;
1992 info
.si_code
= VKI_TRAP_BRKPT
; /* tjh: only ever called for a brkpt ins */
1994 # if defined(VGP_x86_linux) || defined(VGP_amd64_linux)
1995 uc
.uc_mcontext
.trapno
= 3; /* tjh: this is the x86 trap number
1996 for a breakpoint trap... */
1997 uc
.uc_mcontext
.err
= 0; /* tjh: no error code for x86
1998 breakpoint trap... */
1999 # elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2000 /* the same thing, but using Darwin field/struct names */
2001 VG_(memset
)(&mc
, 0, sizeof(mc
));
2002 uc
.uc_mcontext
= &mc
;
2003 uc
.uc_mcontext
->__es
.__trapno
= 3;
2004 uc
.uc_mcontext
->__es
.__err
= 0;
2007 /* fixs390: do we need to do anything here for s390 ? */
2008 if (VG_(gdbserver_report_signal
) (VKI_SIGTRAP
, tid
)) {
2009 resume_scheduler(tid
);
2010 deliver_signal(tid
, &info
, &uc
);
2013 resume_scheduler(tid
);
2016 // Synthesise a SIGFPE.
2017 void VG_(synth_sigfpe
)(ThreadId tid
, UInt code
)
2019 // Only tested on mips32 and mips64
2020 #if !defined(VGA_mips32) && !defined(VGA_mips64)
2024 struct vki_ucontext uc
;
2026 vg_assert(VG_(threads
)[tid
].status
== VgTs_Runnable
);
2028 VG_(memset
)(&info
, 0, sizeof(info
));
2029 VG_(memset
)(&uc
, 0, sizeof(uc
));
2030 info
.si_signo
= VKI_SIGFPE
;
2031 info
.si_code
= code
;
2033 if (VG_(gdbserver_report_signal
) (VKI_SIGFPE
, tid
)) {
2034 resume_scheduler(tid
);
2035 deliver_signal(tid
, &info
, &uc
);
2038 resume_scheduler(tid
);
2042 /* Make a signal pending for a thread, for later delivery.
2043 VG_(poll_signals) will arrange for it to be delivered at the right
2046 tid==0 means add it to the process-wide queue, and not sent it to a
2050 void queue_signal(ThreadId tid
, const vki_siginfo_t
*si
)
2054 vki_sigset_t savedmask
;
2056 tst
= VG_(get_ThreadState
)(tid
);
2058 /* Protect the signal queue against async deliveries */
2059 block_all_host_signals(&savedmask
);
2061 if (tst
->sig_queue
== NULL
) {
2062 tst
->sig_queue
= VG_(malloc
)("signals.qs.1", sizeof(*tst
->sig_queue
));
2063 VG_(memset
)(tst
->sig_queue
, 0, sizeof(*tst
->sig_queue
));
2065 sq
= tst
->sig_queue
;
2067 if (VG_(clo_trace_signals
))
2068 VG_(dmsg
)("Queueing signal %d (idx %d) to thread %d\n",
2069 si
->si_signo
, sq
->next
, tid
);
2071 /* Add signal to the queue. If the queue gets overrun, then old
2072 queued signals may get lost.
2074 XXX We should also keep a sigset of pending signals, so that at
2075 least a non-siginfo signal gets deliviered.
2077 if (sq
->sigs
[sq
->next
].si_signo
!= 0)
2078 VG_(umsg
)("Signal %d being dropped from thread %d's queue\n",
2079 sq
->sigs
[sq
->next
].si_signo
, tid
);
2081 sq
->sigs
[sq
->next
] = *si
;
2082 sq
->next
= (sq
->next
+1) % N_QUEUED_SIGNALS
;
2084 restore_all_host_signals(&savedmask
);
2088 Returns the next queued signal for thread tid which is in "set".
2089 tid==0 means process-wide signal. Set si_signo to 0 when the
2090 signal has been delivered.
2092 Must be called with all signals blocked, to protect against async
2095 static vki_siginfo_t
*next_queued(ThreadId tid
, const vki_sigset_t
*set
)
2097 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
2100 vki_siginfo_t
*ret
= NULL
;
2102 sq
= tst
->sig_queue
;
2109 VG_(printf
)("idx=%d si_signo=%d inset=%d\n", idx
,
2110 sq
->sigs
[idx
].si_signo
,
2111 VG_(sigismember
)(set
, sq
->sigs
[idx
].si_signo
));
2113 if (sq
->sigs
[idx
].si_signo
!= 0
2114 && VG_(sigismember
)(set
, sq
->sigs
[idx
].si_signo
)) {
2115 if (VG_(clo_trace_signals
))
2116 VG_(dmsg
)("Returning queued signal %d (idx %d) for thread %d\n",
2117 sq
->sigs
[idx
].si_signo
, idx
, tid
);
2118 ret
= &sq
->sigs
[idx
];
2122 idx
= (idx
+ 1) % N_QUEUED_SIGNALS
;
2123 } while(idx
!= sq
->next
);
2128 static int sanitize_si_code(int si_code
)
2130 #if defined(VGO_linux)
2131 /* The linux kernel uses the top 16 bits of si_code for it's own
2132 use and only exports the bottom 16 bits to user space - at least
2133 that is the theory, but it turns out that there are some kernels
2134 around that forget to mask out the top 16 bits so we do it here.
2136 The kernel treats the bottom 16 bits as signed and (when it does
2137 mask them off) sign extends them when exporting to user space so
2138 we do the same thing here. */
2139 return (Short
)si_code
;
2140 #elif defined(VGO_darwin)
2148 Receive an async signal from the kernel.
2150 This should only happen when the thread is blocked in a syscall,
2151 since that's the only time this set of signals is unblocked.
2154 void async_signalhandler ( Int sigNo
,
2155 vki_siginfo_t
*info
, struct vki_ucontext
*uc
)
2157 ThreadId tid
= VG_(lwpid_to_vgtid
)(VG_(gettid
)());
2158 ThreadState
* tst
= VG_(get_ThreadState
)(tid
);
2161 /* The thread isn't currently running, make it so before going on */
2162 vg_assert(tst
->status
== VgTs_WaitSys
);
2163 VG_(acquire_BigLock
)(tid
, "async_signalhandler");
2165 info
->si_code
= sanitize_si_code(info
->si_code
);
2167 if (VG_(clo_trace_signals
))
2168 VG_(dmsg
)("async signal handler: signal=%d, tid=%d, si_code=%d\n",
2169 sigNo
, tid
, info
->si_code
);
2171 /* Update thread state properly. The signal can only have been
2172 delivered whilst we were in
2173 coregrind/m_syswrap/syscall-<PLAT>.S, and only then in the
2174 window between the two sigprocmask calls, since at all other
2175 times, we run with async signals on the host blocked. Hence
2176 make enquiries on the basis that we were in or very close to a
2177 syscall, and attempt to fix up the guest state accordingly.
2179 (normal async signals occurring during computation are blocked,
2180 but periodically polled for using VG_(sigtimedwait_zero), and
2181 delivered at a point convenient for us. Hence this routine only
2182 deals with signals that are delivered to a thread during a
2185 /* First, extract a SysRes from the ucontext_t* given to this
2186 handler. If it is subsequently established by
2187 VG_(fixup_guest_state_after_syscall_interrupted) that the
2188 syscall was complete but the results had not been committed yet
2189 to the guest state, then it'll have to commit the results itself
2190 "by hand", and so we need to extract the SysRes. Of course if
2191 the thread was not in that particular window then the
2192 SysRes will be meaningless, but that's OK too because
2193 VG_(fixup_guest_state_after_syscall_interrupted) will detect
2194 that the thread was not in said window and ignore the SysRes. */
2196 /* To make matters more complex still, on Darwin we need to know
2197 the "class" of the syscall under consideration in order to be
2198 able to extract the a correct SysRes. The class will have been
2199 saved just before the syscall, by VG_(client_syscall), into this
2200 thread's tst->arch.vex.guest_SC_CLASS. Hence: */
2201 # if defined(VGO_darwin)
2202 sres
= VG_UCONTEXT_SYSCALL_SYSRES(uc
, tst
->arch
.vex
.guest_SC_CLASS
);
2204 sres
= VG_UCONTEXT_SYSCALL_SYSRES(uc
);
2208 VG_(fixup_guest_state_after_syscall_interrupted
)(
2210 VG_UCONTEXT_INSTR_PTR(uc
),
2212 !!(scss
.scss_per_sig
[sigNo
].scss_flags
& VKI_SA_RESTART
)
2216 /* Set up the thread's state to deliver a signal */
2217 if (!is_sig_ign(info
->si_signo
, tid
))
2218 deliver_signal(tid
, info
, uc
);
2220 /* It's crucial that (1) and (2) happen in the order (1) then (2)
2221 and not the other way around. (1) fixes up the guest thread
2222 state to reflect the fact that the syscall was interrupted --
2223 either to restart the syscall or to return EINTR. (2) then sets
2224 up the thread state to deliver the signal. Then we resume
2225 execution. First, the signal handler is run, since that's the
2226 second adjustment we made to the thread state. If that returns,
2227 then we resume at the guest state created by (1), viz, either
2228 the syscall returns EINTR or is restarted.
2230 If (2) was done before (1) the outcome would be completely
2231 different, and wrong. */
2233 /* longjmp back to the thread's main loop to start executing the
2235 resume_scheduler(tid
);
2237 VG_(core_panic
)("async_signalhandler: got unexpected signal "
2238 "while outside of scheduler");
2241 /* Extend the stack to cover addr. maxsize is the limit the stack can grow to.
2243 Returns True on success, False on failure.
2245 Succeeds without doing anything if addr is already within a segment.
2247 Failure could be caused by:
2248 - addr not below a growable segment
2249 - new stack size would exceed maxsize
2250 - mmap failed for some other reason
2252 Bool
VG_(extend_stack
)(Addr addr
, UInt maxsize
)
2256 /* Find the next Segment above addr */
2258 = VG_(am_find_nsegment
)(addr
);
2259 NSegment
const* seg_next
2260 = seg
? VG_(am_next_nsegment
)( seg
, True
/*fwds*/ )
2263 if (seg
&& seg
->kind
== SkAnonC
)
2264 /* addr is already mapped. Nothing to do. */
2267 /* Check that the requested new base is in a shrink-down
2268 reservation section which abuts an anonymous mapping that
2269 belongs to the client. */
2271 && seg
->kind
== SkResvn
2272 && seg
->smode
== SmUpper
2274 && seg_next
->kind
== SkAnonC
2275 && seg
->end
+1 == seg_next
->start
))
2278 udelta
= VG_PGROUNDUP(seg_next
->start
- addr
);
2279 VG_(debugLog
)(1, "signals",
2280 "extending a stack base 0x%llx down by %lld\n",
2281 (ULong
)seg_next
->start
, (ULong
)udelta
);
2282 if (! VG_(am_extend_into_adjacent_reservation_client
)
2283 ( seg_next
, -(SSizeT
)udelta
)) {
2284 VG_(debugLog
)(1, "signals", "extending a stack base: FAILED\n");
2288 /* When we change the main stack, we have to let the stack handling
2289 code know about it. */
2290 VG_(change_stack
)(VG_(clstk_id
), addr
, VG_(clstk_end
));
2292 if (VG_(clo_sanity_level
) > 2)
2293 VG_(sanity_check_general
)(False
);
2298 static void (*fault_catcher
)(Int sig
, Addr addr
) = NULL
;
2300 void VG_(set_fault_catcher
)(void (*catcher
)(Int
, Addr
))
2303 VG_(debugLog
)(0, "signals", "set fault catcher to %p\n", catcher
);
2304 vg_assert2(NULL
== catcher
|| NULL
== fault_catcher
,
2305 "Fault catcher is already registered");
2307 fault_catcher
= catcher
;
2311 void sync_signalhandler_from_user ( ThreadId tid
,
2312 Int sigNo
, vki_siginfo_t
*info
, struct vki_ucontext
*uc
)
2316 /* If some user-process sent us a sync signal (ie. it's not the result
2317 of a faulting instruction), then how we treat it depends on when it
2320 if (VG_(threads
)[tid
].status
== VgTs_WaitSys
) {
2321 /* Signal arrived while we're blocked in a syscall. This means that
2322 the client's signal mask was applied. In other words, so we can't
2323 get here unless the client wants this signal right now. This means
2324 we can simply use the async_signalhandler. */
2325 if (VG_(clo_trace_signals
))
2326 VG_(dmsg
)("Delivering user-sent sync signal %d as async signal\n",
2329 async_signalhandler(sigNo
, info
, uc
);
2330 VG_(core_panic
)("async_signalhandler returned!?\n");
2333 /* Signal arrived while in generated client code, or while running
2334 Valgrind core code. That means that every thread has these signals
2335 unblocked, so we can't rely on the kernel to route them properly, so
2336 we need to queue them manually. */
2337 if (VG_(clo_trace_signals
))
2338 VG_(dmsg
)("Routing user-sent sync signal %d via queue\n", sigNo
);
2340 # if defined(VGO_linux)
2341 /* On Linux, first we have to do a sanity check of the siginfo. */
2342 if (info
->VKI_SIGINFO_si_pid
== 0) {
2343 /* There's a per-user limit of pending siginfo signals. If
2344 you exceed this, by having more than that number of
2345 pending signals with siginfo, then new signals are
2346 delivered without siginfo. This condition can be caused
2347 by any unrelated program you're running at the same time
2348 as Valgrind, if it has a large number of pending siginfo
2349 signals which it isn't taking delivery of.
2351 Since we depend on siginfo to work out why we were sent a
2352 signal and what we should do about it, we really can't
2353 continue unless we get it. */
2354 VG_(umsg
)("Signal %d (%s) appears to have lost its siginfo; "
2355 "I can't go on.\n", sigNo
, VG_(signame
)(sigNo
));
2357 " This may be because one of your programs has consumed your ration of\n"
2358 " siginfo structures. For more information, see:\n"
2359 " http://kerneltrap.org/mailarchive/1/message/25599/thread\n"
2360 " Basically, some program on your system is building up a large queue of\n"
2361 " pending signals, and this causes the siginfo data for other signals to\n"
2362 " be dropped because it's exceeding a system limit. However, Valgrind\n"
2363 " absolutely needs siginfo for SIGSEGV. A workaround is to track down the\n"
2364 " offending program and avoid running it while using Valgrind, but there\n"
2365 " is no easy way to do this. Apparently the problem was fixed in kernel\n"
2368 /* It's a fatal signal, so we force the default handler. */
2369 VG_(set_default_handler
)(sigNo
);
2370 deliver_signal(tid
, info
, uc
);
2371 resume_scheduler(tid
);
2372 VG_(exit
)(99); /* If we can't resume, then just exit */
2376 qtid
= 0; /* shared pending by default */
2377 # if defined(VGO_linux)
2378 if (info
->si_code
== VKI_SI_TKILL
)
2379 qtid
= tid
; /* directed to us specifically */
2381 queue_signal(qtid
, info
);
2385 /* Returns the reported fault address for an exact address */
2386 static Addr
fault_mask(Addr in
)
2388 /* We have to use VG_PGROUNDDN because faults on s390x only deliver
2389 the page address but not the address within a page.
2391 # if defined(VGA_s390x)
2392 return VG_PGROUNDDN(in
);
2398 /* Returns True if the sync signal was due to the stack requiring extension
2399 and the extension was successful.
2401 static Bool
extend_stack_if_appropriate(ThreadId tid
, vki_siginfo_t
* info
)
2405 NSegment
const* seg
;
2406 NSegment
const* seg_next
;
2408 if (info
->si_signo
!= VKI_SIGSEGV
)
2411 fault
= (Addr
)info
->VKI_SIGINFO_si_addr
;
2412 esp
= VG_(get_SP
)(tid
);
2413 seg
= VG_(am_find_nsegment
)(fault
);
2414 seg_next
= seg
? VG_(am_next_nsegment
)( seg
, True
/*fwds*/ )
2417 if (VG_(clo_trace_signals
)) {
2419 VG_(dmsg
)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2421 info
->si_code
, fault
, tid
, esp
);
2423 VG_(dmsg
)("SIGSEGV: si_code=%d faultaddr=%#lx tid=%d ESP=%#lx "
2425 info
->si_code
, fault
, tid
, esp
, seg
->start
, seg
->end
);
2428 if (info
->si_code
== VKI_SEGV_MAPERR
2430 && seg
->kind
== SkResvn
2431 && seg
->smode
== SmUpper
2433 && seg_next
->kind
== SkAnonC
2434 && seg
->end
+1 == seg_next
->start
2435 && fault
>= fault_mask(esp
- VG_STACK_REDZONE_SZB
)) {
2436 /* If the fault address is above esp but below the current known
2437 stack segment base, and it was a fault because there was
2438 nothing mapped there (as opposed to a permissions fault),
2439 then extend the stack segment.
2441 Addr base
= VG_PGROUNDDN(esp
- VG_STACK_REDZONE_SZB
);
2442 if (VG_(extend_stack
)(base
, VG_(threads
)[tid
].client_stack_szB
)) {
2443 if (VG_(clo_trace_signals
))
2444 VG_(dmsg
)(" -> extended stack base to %#lx\n",
2445 VG_PGROUNDDN(fault
));
2448 VG_(umsg
)("Stack overflow in thread %d: can't grow stack to %#lx\n",
2458 void sync_signalhandler_from_kernel ( ThreadId tid
,
2459 Int sigNo
, vki_siginfo_t
*info
, struct vki_ucontext
*uc
)
2461 /* Check to see if some part of Valgrind itself is interested in faults.
2462 The fault catcher should never be set whilst we're in generated code, so
2463 check for that. AFAIK the only use of the catcher right now is
2464 memcheck's leak detector. */
2465 if (fault_catcher
) {
2466 vg_assert(VG_(in_generated_code
) == False
);
2468 (*fault_catcher
)(sigNo
, (Addr
)info
->VKI_SIGINFO_si_addr
);
2469 /* If the catcher returns, then it didn't handle the fault,
2470 so carry on panicking. */
2473 if (extend_stack_if_appropriate(tid
, info
)) {
2474 /* Stack extension occurred, so we don't need to do anything else; upon
2475 returning from this function, we'll restart the host (hence guest)
2478 /* OK, this is a signal we really have to deal with. If it came
2479 from the client's code, then we can jump back into the scheduler
2480 and have it delivered. Otherwise it's a Valgrind bug. */
2481 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
2483 if (VG_(sigismember
)(&tst
->sig_mask
, sigNo
)) {
2484 /* signal is blocked, but they're not allowed to block faults */
2485 VG_(set_default_handler
)(sigNo
);
2488 if (VG_(in_generated_code
)) {
2489 if (VG_(gdbserver_report_signal
) (sigNo
, tid
)
2490 || VG_(sigismember
)(&tst
->sig_mask
, sigNo
)) {
2491 /* Can't continue; must longjmp back to the scheduler and thus
2492 enter the sighandler immediately. */
2493 deliver_signal(tid
, info
, uc
);
2494 resume_scheduler(tid
);
2497 resume_scheduler(tid
);
2500 /* If resume_scheduler returns or its our fault, it means we
2501 don't have longjmp set up, implying that we weren't running
2502 client code, and therefore it was actually generated by
2503 Valgrind internally.
2505 VG_(dmsg
)("VALGRIND INTERNAL ERROR: Valgrind received "
2506 "a signal %d (%s) - exiting\n",
2507 sigNo
, VG_(signame
)(sigNo
));
2509 VG_(dmsg
)("si_code=%x; Faulting address: %p; sp: %#lx\n",
2510 info
->si_code
, info
->VKI_SIGINFO_si_addr
,
2511 VG_UCONTEXT_STACK_PTR(uc
));
2514 VG_(kill_self
)(sigNo
); /* generate a core dump */
2516 //if (tid == 0) /* could happen after everyone has exited */
2517 // tid = VG_(master_tid);
2518 vg_assert(tid
!= 0);
2520 UnwindStartRegs startRegs
;
2521 VG_(memset
)(&startRegs
, 0, sizeof(startRegs
));
2523 VG_UCONTEXT_TO_UnwindStartRegs(&startRegs
, uc
);
2524 VG_(core_panic_at
)("Killed by fatal signal", &startRegs
);
2529 Receive a sync signal from the host.
2532 void sync_signalhandler ( Int sigNo
,
2533 vki_siginfo_t
*info
, struct vki_ucontext
*uc
)
2535 ThreadId tid
= VG_(lwpid_to_vgtid
)(VG_(gettid
)());
2539 VG_(printf
)("sync_sighandler(%d, %p, %p)\n", sigNo
, info
, uc
);
2541 vg_assert(info
!= NULL
);
2542 vg_assert(info
->si_signo
== sigNo
);
2543 vg_assert(sigNo
== VKI_SIGSEGV
||
2544 sigNo
== VKI_SIGBUS
||
2545 sigNo
== VKI_SIGFPE
||
2546 sigNo
== VKI_SIGILL
||
2547 sigNo
== VKI_SIGTRAP
);
2549 info
->si_code
= sanitize_si_code(info
->si_code
);
2551 from_user
= !is_signal_from_kernel(tid
, sigNo
, info
->si_code
);
2553 if (VG_(clo_trace_signals
)) {
2554 VG_(dmsg
)("sync signal handler: "
2555 "signal=%d, si_code=%d, EIP=%#lx, eip=%#lx, from %s\n",
2556 sigNo
, info
->si_code
, VG_(get_IP
)(tid
),
2557 VG_UCONTEXT_INSTR_PTR(uc
),
2558 ( from_user
? "user" : "kernel" ));
2560 vg_assert(sigNo
>= 1 && sigNo
<= VG_(max_signal
));
2564 VG_(printf)("info->si_signo %d\n", info->si_signo);
2565 VG_(printf)("info->si_errno %d\n", info->si_errno);
2566 VG_(printf)("info->si_code %d\n", info->si_code);
2567 VG_(printf)("info->si_pid %d\n", info->si_pid);
2568 VG_(printf)("info->si_uid %d\n", info->si_uid);
2569 VG_(printf)("info->si_status %d\n", info->si_status);
2570 VG_(printf)("info->si_addr %p\n", info->si_addr);
2574 /* Figure out if the signal is being sent from outside the process.
2575 (Why do we care?) If the signal is from the user rather than the
2576 kernel, then treat it more like an async signal than a sync signal --
2577 that is, merely queue it for later delivery. */
2579 sync_signalhandler_from_user( tid
, sigNo
, info
, uc
);
2581 sync_signalhandler_from_kernel(tid
, sigNo
, info
, uc
);
2587 Kill this thread. Makes it leave any syscall it might be currently
2588 blocked in, and return to the scheduler. This doesn't mark the thread
2589 as exiting; that's the caller's job.
2591 static void sigvgkill_handler(int signo
, vki_siginfo_t
*si
,
2592 struct vki_ucontext
*uc
)
2594 ThreadId tid
= VG_(lwpid_to_vgtid
)(VG_(gettid
)());
2595 ThreadStatus at_signal
= VG_(threads
)[tid
].status
;
2597 if (VG_(clo_trace_signals
))
2598 VG_(dmsg
)("sigvgkill for lwp %d tid %d\n", VG_(gettid
)(), tid
);
2600 VG_(acquire_BigLock
)(tid
, "sigvgkill_handler");
2602 vg_assert(signo
== VG_SIGVGKILL
);
2603 vg_assert(si
->si_signo
== signo
);
2605 /* jrs 2006 August 3: the following assertion seems incorrect to
2606 me, and fails on AIX. sigvgkill could be sent to a thread which
2607 is runnable - see VG_(nuke_all_threads_except) in the scheduler.
2608 Hence comment these out ..
2610 vg_assert(VG_(threads)[tid].status == VgTs_WaitSys);
2611 VG_(post_syscall)(tid);
2615 if (at_signal
== VgTs_WaitSys
)
2616 VG_(post_syscall
)(tid
);
2617 /* jrs 2006 August 3 ends */
2619 resume_scheduler(tid
);
2621 VG_(core_panic
)("sigvgkill_handler couldn't return to the scheduler\n");
2624 static __attribute((unused
))
2625 void pp_ksigaction ( vki_sigaction_toK_t
* sa
)
2628 VG_(printf
)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n",
2631 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2637 VG_(printf
)("pp_ksigaction: { ");
2638 for (i
= 1; i
<= VG_(max_signal
); i
++)
2639 if (VG_(sigismember(&(sa
->sa_mask
),i
)))
2640 VG_(printf
)("%d ", i
);
2645 Force signal handler to default
2647 void VG_(set_default_handler
)(Int signo
)
2649 vki_sigaction_toK_t sa
;
2651 sa
.ksa_handler
= VKI_SIG_DFL
;
2653 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2656 VG_(sigemptyset
)(&sa
.sa_mask
);
2658 VG_(do_sys_sigaction
)(signo
, &sa
, NULL
);
2662 Poll for pending signals, and set the next one up for delivery.
2664 void VG_(poll_signals
)(ThreadId tid
)
2666 vki_siginfo_t si
, *sip
;
2667 vki_sigset_t pollset
;
2668 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
2669 vki_sigset_t saved_mask
;
2671 /* look for all the signals this thread isn't blocking */
2672 /* pollset = ~tst->sig_mask */
2673 VG_(sigcomplementset
)( &pollset
, &tst
->sig_mask
);
2675 block_all_host_signals(&saved_mask
); // protect signal queue
2677 /* First look for any queued pending signals */
2678 sip
= next_queued(tid
, &pollset
); /* this thread */
2681 sip
= next_queued(0, &pollset
); /* process-wide */
2683 /* If there was nothing queued, ask the kernel for a pending signal */
2684 if (sip
== NULL
&& VG_(sigtimedwait_zero
)(&pollset
, &si
) > 0) {
2685 if (VG_(clo_trace_signals
))
2686 VG_(dmsg
)("poll_signals: got signal %d for thread %d\n",
2692 /* OK, something to do; deliver it */
2693 if (VG_(clo_trace_signals
))
2694 VG_(dmsg
)("Polling found signal %d for tid %d\n", sip
->si_signo
, tid
);
2695 if (!is_sig_ign(sip
->si_signo
, tid
))
2696 deliver_signal(tid
, sip
, NULL
);
2697 else if (VG_(clo_trace_signals
))
2698 VG_(dmsg
)(" signal %d ignored\n", sip
->si_signo
);
2700 sip
->si_signo
= 0; /* remove from signal queue, if that's
2701 where it came from */
2704 restore_all_host_signals(&saved_mask
);
2707 /* At startup, copy the process' real signal state to the SCSS.
2708 Whilst doing this, block all real signals. Then calculate SKSS and
2709 set the kernel to that. Also initialise DCSS.
2711 void VG_(sigstartup_actions
) ( void )
2713 Int i
, ret
, vKI_SIGRTMIN
;
2714 vki_sigset_t saved_procmask
;
2715 vki_sigaction_fromK_t sa
;
2717 VG_(memset
)(&scss
, 0, sizeof(scss
));
2718 VG_(memset
)(&skss
, 0, sizeof(skss
));
2720 # if defined(VKI_SIGRTMIN)
2721 vKI_SIGRTMIN
= VKI_SIGRTMIN
;
2723 vKI_SIGRTMIN
= 0; /* eg Darwin */
2726 /* VG_(printf)("SIGSTARTUP\n"); */
2727 /* Block all signals. saved_procmask remembers the previous mask,
2728 which the first thread inherits.
2730 block_all_host_signals( &saved_procmask
);
2732 /* Copy per-signal settings to SCSS. */
2733 for (i
= 1; i
<= _VKI_NSIG
; i
++) {
2734 /* Get the old host action */
2735 ret
= VG_(sigaction
)(i
, NULL
, &sa
);
2737 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2738 /* apparently we may not even ask about the disposition of these
2739 signals, let alone change them */
2740 if (ret
!= 0 && (i
== VKI_SIGKILL
|| i
== VKI_SIGSTOP
))
2747 /* Try setting it back to see if this signal is really
2749 if (vKI_SIGRTMIN
> 0 /* it actually exists on this platform */
2750 && i
>= vKI_SIGRTMIN
) {
2751 vki_sigaction_toK_t tsa
, sa2
;
2753 tsa
.ksa_handler
= (void *)sync_signalhandler
;
2754 tsa
.sa_flags
= VKI_SA_SIGINFO
;
2755 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2756 tsa
.sa_restorer
= 0;
2758 VG_(sigfillset
)(&tsa
.sa_mask
);
2760 /* try setting it to some arbitrary handler */
2761 if (VG_(sigaction
)(i
, &tsa
, NULL
) != 0) {
2762 /* failed - not really usable */
2766 VG_(convert_sigaction_fromK_to_toK
)( &sa
, &sa2
);
2767 ret
= VG_(sigaction
)(i
, &sa2
, NULL
);
2768 vg_assert(ret
== 0);
2771 VG_(max_signal
) = i
;
2773 if (VG_(clo_trace_signals
) && VG_(clo_verbosity
) > 2)
2774 VG_(printf
)("snaffling handler 0x%lx for signal %d\n",
2775 (Addr
)(sa
.ksa_handler
), i
);
2777 scss
.scss_per_sig
[i
].scss_handler
= sa
.ksa_handler
;
2778 scss
.scss_per_sig
[i
].scss_flags
= sa
.sa_flags
;
2779 scss
.scss_per_sig
[i
].scss_mask
= sa
.sa_mask
;
2781 scss
.scss_per_sig
[i
].scss_restorer
= NULL
;
2782 # if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
2783 scss
.scss_per_sig
[i
].scss_restorer
= sa
.sa_restorer
;
2786 scss
.scss_per_sig
[i
].scss_sa_tramp
= NULL
;
2787 # if defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
2788 scss
.scss_per_sig
[i
].scss_sa_tramp
= NULL
;
2790 /* We can't know what it was, because Darwin's sys_sigaction
2795 if (VG_(clo_trace_signals
))
2796 VG_(dmsg
)("Max kernel-supported signal is %d\n", VG_(max_signal
));
2798 /* Our private internal signals are treated as ignored */
2799 scss
.scss_per_sig
[VG_SIGVGKILL
].scss_handler
= VKI_SIG_IGN
;
2800 scss
.scss_per_sig
[VG_SIGVGKILL
].scss_flags
= VKI_SA_SIGINFO
;
2801 VG_(sigfillset
)(&scss
.scss_per_sig
[VG_SIGVGKILL
].scss_mask
);
2803 /* Copy the process' signal mask into the root thread. */
2804 vg_assert(VG_(threads
)[1].status
== VgTs_Init
);
2805 for (i
= 2; i
< VG_N_THREADS
; i
++)
2806 vg_assert(VG_(threads
)[i
].status
== VgTs_Empty
);
2808 VG_(threads
)[1].sig_mask
= saved_procmask
;
2809 VG_(threads
)[1].tmp_sig_mask
= saved_procmask
;
2811 /* Calculate SKSS and apply it. This also sets the initial kernel
2812 mask we need to run with. */
2813 handle_SCSS_change( True
/* forced update */ );
2815 /* Leave with all signals still blocked; the thread scheduler loop
2816 will set the appropriate mask at the appropriate time. */
2819 /*--------------------------------------------------------------------*/
2821 /*--------------------------------------------------------------------*/