2 /*--------------------------------------------------------------------*/
3 /*--- Support for doing system calls. syscall-amd64-freebsd.S ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2000-2008 Julian Seward
12 Copyright (C) 2018-2021 Paul Floyd
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, see <http://www.gnu.org/licenses/>.
28 The GNU General Public License is contained in the file COPYING.
31 #include "pub_core_basics_asm.h"
33 #if defined(VGP_amd64_freebsd)
35 #include "pub_core_vkiscnums_asm.h"
36 #include "libvex_guest_offsets.h"
39 /*----------------------------------------------------------------*/
41 Perform a syscall for the client. This will run a syscall
42 with the client's specific per-thread signal mask.
44 The structure of this function is such that, if the syscall is
45 interrupted by a signal, we can determine exactly what
46 execution state we were in with respect to the execution of
47 the syscall by examining the value of %eip in the signal
48 handler. This means that we can always do the appropriate
49 thing to precisely emulate the kernel's signal/syscall
52 The syscall number is taken from the argument, even though it
53 should also be in guest_state->guest_RAX. The syscall result
54 is written back to guest_state->guest_RAX on completion.
56 Returns 0 if the syscall was successfully called (even if the
57 syscall itself failed), or a -ve error code if one of the
58 sigprocmasks failed (there's no way to determine which one
61 VG_(fixup_guest_state_after_syscall_interrupted) does the
62 thread state fixup in the case where we were interrupted by a
67 Int ML_(do_syscall_for_client_WRK(
69 void* guest_state, // rsi
70 const vki_sigset_t *sysmask, // rdx
71 const vki_sigset_t *postmask, // rcx
76 #define VKI_SIG_SETMASK 3
78 .globl ML_(do_syscall_for_client_WRK)
79 ML_(do_syscall_for_client_WRK):
80 /* save callee-saved regs */
83 pushq %rdi // -8(%rbp) syscallno
84 pushq %rsi // -16(%rbp) guest_state
85 pushq %rdx // -24(%rbp) sysmask
86 pushq %rcx // -32(%rbp) postmask
87 pushq %r8 // -40(%rbp) sigsetSzB
89 1: /* Even though we can't take a signal until the sigprocmask completes,
90 start the range early.
91 If eip is in the range [1,2), the syscall hasn't been started yet */
93 /* Set the signal mask which should be current during the syscall. */
94 /* Save and restore all 5 arg regs round the call. This is easier
95 than figuring out the minimal set to save/restore. */
97 movq $__NR_sigprocmask, %rax // syscall #
98 movq $VKI_SIG_SETMASK, %rdi // how
99 movq %rdx, %rsi // sysmask
100 movq %rcx, %rdx // postmask
103 jb 7f /* sigprocmask failed */
105 /* OK, that worked. Now do the syscall proper. */
107 /* 6 register parameters */
108 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
109 movq OFFSET_amd64_RDI(%r11), %rdi
110 movq OFFSET_amd64_RSI(%r11), %rsi
111 movq OFFSET_amd64_RDX(%r11), %rdx
112 movq OFFSET_amd64_R10(%r11), %r10
113 movq OFFSET_amd64_R8(%r11), %r8
114 movq OFFSET_amd64_R9(%r11), %r9
115 /* 2 stack parameters plus return address (ignored by syscall) */
116 /* @todo PJF there is a potential bug here
117 * syscall can take up to 8 arguments
118 * but when syscall0 or syscall198 is being used
119 * one argument is used for the syscall0/198 id
120 * and one for the actual id and in this case
121 * there could be 3 stack parameters.
122 * However, only mmap takes 8 arguments
123 * and only on x86. It would be an unlikely combination,
124 * but this might break one day. */
125 movq OFFSET_amd64_RSP(%r11), %r11 /* r11 = simulated RSP */
130 /* (fake) return address. */
136 /* If rip==2, then the syscall was either just about
137 to start, or was interrupted and the kernel was
140 3: /* In the range [3, 4), the syscall result is in %rax,
141 but hasn't been committed to RAX. */
143 /* stack contents: 3 words for syscall above, plus our prologue */
144 setc 0(%rsp) /* stash returned carry flag */
146 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
147 movq %rax, OFFSET_amd64_RAX(%r11) /* save back to RAX */
148 movq %rdx, OFFSET_amd64_RDX(%r11) /* save back to RDX */
150 /* save carry flag to VEX */
153 movq %rax, %rdi /* arg1 = new flag */
154 movq %r11, %rsi /* arg2 = vex state */
155 addq $24, %rsp /* remove syscall parameters */
156 movl $1, OFFSET_amd64_SETC(%r11)
157 call LibVEX_GuestAMD64_put_rflag_c
158 movq -16(%rbp), %r11 /* r11 = VexGuestAMD64State * */
159 movl $0, OFFSET_amd64_SETC(%r11)
161 4: /* Re-block signals. If eip is in [4,5), then the syscall
162 is complete and we needn't worry about it. */
164 movq $__NR_sigprocmask, %rax // syscall #
165 movq $VKI_SIG_SETMASK, %rdi // how
166 movq -32(%rbp), %rsi // postmask
167 xorq %rdx, %rdx // NULL
170 jb 7f /* sigprocmask failed */
172 5: /* now safe from signals */
184 7: /* failure: return 0x8000 | error code */
196 /* export the ranges so that
197 VG_(fixup_guest_state_after_syscall_interrupted) can do the
200 .globl ML_(blksys_setup)
201 .globl ML_(blksys_restart)
202 .globl ML_(blksys_complete)
203 .globl ML_(blksys_committed)
204 .globl ML_(blksys_finished)
205 ML_(blksys_setup): .quad 1b
206 ML_(blksys_restart): .quad 2b
207 ML_(blksys_complete): .quad 3b
208 ML_(blksys_committed): .quad 4b
209 ML_(blksys_finished): .quad 5b
212 #endif /* defined(VGP_amd64_freebsd) */
214 /* Let the linker know we don't need an executable stack */
217 /*--------------------------------------------------------------------*/
219 /*--------------------------------------------------------------------*/