2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-amd64-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2014-2017 Petr Pavlu
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #if defined(VGP_amd64_solaris)
31 #include "libvex_guest_offsets.h"
32 #include "pub_core_basics.h"
33 #include "pub_core_debuglog.h"
34 #include "pub_core_vki.h"
35 #include "pub_core_libcassert.h"
36 #include "pub_core_libcbase.h"
37 #include "pub_core_libcprint.h"
38 #include "pub_core_libcsignal.h"
39 #include "pub_core_tooliface.h"
40 #include "pub_core_syswrap.h"
42 #include "priv_types_n_macros.h"
43 #include "priv_syswrap-generic.h"
44 #include "priv_syswrap-solaris.h"
47 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
48 use 'retaddr' as f's return-to address. Also, clear all the integer
49 registers before entering f. */
50 __attribute__((noreturn
))
51 void ML_(call_on_new_stack_0_1
)(Addr stack
, /* %rdi */
52 Addr retaddr
, /* %rsi */
53 void (*f
)(Word
), /* %rdx */
54 Word arg1
); /* %rcx */
57 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
58 "vgModuleLocal_call_on_new_stack_0_1:\n"
59 " movq %rdi, %rsp\n" /* set stack */
60 " movq %rcx, %rdi\n" /* set arg1 */
61 " pushq %rsi\n" /* retaddr to stack */
62 " pushq %rdx\n" /* f to stack */
63 " movq $0, %rax\n" /* zero all GP regs (except %rdi) */
77 " ret\n" /* jump to f */
78 " ud2\n" /* should never get here */
82 /* This function is called to setup a context of a new Valgrind thread (which
83 will run the client code). */
84 void ML_(setup_start_thread_context
)(ThreadId tid
, vki_ucontext_t
*uc
)
86 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
87 UWord
*stack
= (UWord
*)tst
->os_state
.valgrind_stack_init_SP
;
89 VG_(memset
)(uc
, 0, sizeof(*uc
));
90 uc
->uc_flags
= VKI_UC_CPU
| VKI_UC_SIGMASK
;
92 /* Start the thread with everything blocked. */
93 VG_(sigfillset
)(&uc
->uc_sigmask
);
95 /* Set up the stack, it should be always 16-byte aligned before doing
96 a function call, i.e. the first parameter is also 16-byte aligned. */
97 vg_assert(VG_IS_16_ALIGNED(stack
));
99 stack
[0] = 0; /* bogus return value */
101 /* Set up the registers. */
102 uc
->uc_mcontext
.gregs
[VKI_REG_RDI
] = (UWord
)tst
; /* the parameter */
103 uc
->uc_mcontext
.gregs
[VKI_REG_RIP
] = (UWord
)ML_(start_thread_NORETURN
);
104 uc
->uc_mcontext
.gregs
[VKI_REG_RSP
] = (UWord
)stack
;
107 /* Architecture-specific part of VG_(save_context). */
108 void ML_(save_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
111 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
112 struct vki_fpchip_state
*fs
113 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
117 /* Common registers */
118 uc
->uc_mcontext
.gregs
[VKI_REG_RIP
] = tst
->arch
.vex
.guest_RIP
;
119 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RIP
,
120 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RIP
], sizeof(UWord
));
121 uc
->uc_mcontext
.gregs
[VKI_REG_RAX
] = tst
->arch
.vex
.guest_RAX
;
122 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RAX
,
123 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RAX
], sizeof(UWord
));
124 uc
->uc_mcontext
.gregs
[VKI_REG_RBX
] = tst
->arch
.vex
.guest_RBX
;
125 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RBX
,
126 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RBX
], sizeof(UWord
));
127 uc
->uc_mcontext
.gregs
[VKI_REG_RCX
] = tst
->arch
.vex
.guest_RCX
;
128 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RCX
,
129 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RCX
], sizeof(UWord
));
130 uc
->uc_mcontext
.gregs
[VKI_REG_RDX
] = tst
->arch
.vex
.guest_RDX
;
131 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RDX
,
132 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RDX
], sizeof(UWord
));
133 uc
->uc_mcontext
.gregs
[VKI_REG_RBP
] = tst
->arch
.vex
.guest_RBP
;
134 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RBP
,
135 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RBP
], sizeof(UWord
));
136 uc
->uc_mcontext
.gregs
[VKI_REG_RSI
] = tst
->arch
.vex
.guest_RSI
;
137 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RSI
,
138 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RSI
], sizeof(UWord
));
139 uc
->uc_mcontext
.gregs
[VKI_REG_RDI
] = tst
->arch
.vex
.guest_RDI
;
140 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RDI
,
141 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RDI
], sizeof(UWord
));
142 uc
->uc_mcontext
.gregs
[VKI_REG_R8
] = tst
->arch
.vex
.guest_R8
;
143 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R8
,
144 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R8
], sizeof(UWord
));
145 uc
->uc_mcontext
.gregs
[VKI_REG_R9
] = tst
->arch
.vex
.guest_R9
;
146 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R9
,
147 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R9
], sizeof(UWord
));
148 uc
->uc_mcontext
.gregs
[VKI_REG_R10
] = tst
->arch
.vex
.guest_R10
;
149 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R10
,
150 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R10
], sizeof(UWord
));
151 uc
->uc_mcontext
.gregs
[VKI_REG_R11
] = tst
->arch
.vex
.guest_R11
;
152 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R11
,
153 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R11
], sizeof(UWord
));
154 uc
->uc_mcontext
.gregs
[VKI_REG_R12
] = tst
->arch
.vex
.guest_R12
;
155 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R12
,
156 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R12
], sizeof(UWord
));
157 uc
->uc_mcontext
.gregs
[VKI_REG_R13
] = tst
->arch
.vex
.guest_R13
;
158 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R13
,
159 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R13
], sizeof(UWord
));
160 uc
->uc_mcontext
.gregs
[VKI_REG_R14
] = tst
->arch
.vex
.guest_R14
;
161 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R14
,
162 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R14
], sizeof(UWord
));
163 uc
->uc_mcontext
.gregs
[VKI_REG_R15
] = tst
->arch
.vex
.guest_R15
;
164 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_R15
,
165 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R15
], sizeof(UWord
));
166 uc
->uc_mcontext
.gregs
[VKI_REG_RSP
] = tst
->arch
.vex
.guest_RSP
;
167 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_amd64_RSP
,
168 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RSP
], sizeof(UWord
));
171 uc
->uc_mcontext
.gregs
[VKI_REG_ERR
] = 0;
172 VG_TRACK(post_mem_write
, part
, tid
,
173 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_ERR
], sizeof(UWord
));
174 uc
->uc_mcontext
.gregs
[VKI_REG_TRAPNO
] = 0;
175 VG_TRACK(post_mem_write
, part
, tid
,
176 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_TRAPNO
], sizeof(UWord
));
178 /* Segment registers */
179 /* Valgrind does not support moves from/to segment registers on AMD64. The
180 values returned below are the ones that are set by the kernel when
181 a program is started. */
182 uc
->uc_mcontext
.gregs
[VKI_REG_CS
] = VKI_UCS_SEL
;
183 VG_TRACK(post_mem_write
, part
, tid
,
184 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_CS
], sizeof(UWord
));
185 uc
->uc_mcontext
.gregs
[VKI_REG_DS
] = 0;
186 VG_TRACK(post_mem_write
, part
, tid
,
187 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_DS
], sizeof(UWord
));
188 uc
->uc_mcontext
.gregs
[VKI_REG_SS
] = VKI_UDS_SEL
;
189 VG_TRACK(post_mem_write
, part
, tid
,
190 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_SS
], sizeof(UWord
));
191 uc
->uc_mcontext
.gregs
[VKI_REG_ES
] = 0;
192 VG_TRACK(post_mem_write
, part
, tid
,
193 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_ES
], sizeof(UWord
));
194 uc
->uc_mcontext
.gregs
[VKI_REG_FS
] = 0;
195 VG_TRACK(post_mem_write
, part
, tid
,
196 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_FS
], sizeof(UWord
));
197 uc
->uc_mcontext
.gregs
[VKI_REG_GS
] = 0;
198 VG_TRACK(post_mem_write
, part
, tid
,
199 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_GS
], sizeof(UWord
));
202 uc
->uc_mcontext
.gregs
[VKI_REG_FSBASE
] = tst
->arch
.vex
.guest_FS_CONST
;
203 VG_TRACK(post_mem_write
, part
, tid
,
204 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_FSBASE
], sizeof(UWord
));
205 uc
->uc_mcontext
.gregs
[VKI_REG_GSBASE
] = 0;
206 VG_TRACK(post_mem_write
, part
, tid
,
207 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_GSBASE
], sizeof(UWord
));
209 /* Handle rflags. Refer to the x86-solaris variant of this code for
210 a detailed description. */
211 uc
->uc_mcontext
.gregs
[VKI_REG_RFL
] =
212 LibVEX_GuestAMD64_get_rflags(&tst
->arch
.vex
);
213 VG_TRACK(post_mem_write
, part
, tid
,
214 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RFL
], sizeof(UWord
));
215 VKI_UC_GUEST_CC_OP(uc
) = tst
->arch
.vex
.guest_CC_OP
;
216 VKI_UC_GUEST_CC_NDEP(uc
) = tst
->arch
.vex
.guest_CC_NDEP
;
217 VKI_UC_GUEST_CC_DEP1(uc
) = tst
->arch
.vex
.guest_CC_DEP1
;
218 VG_TRACK(copy_reg_to_mem
, part
, tid
,
219 offsetof(VexGuestAMD64State
, guest_CC_DEP1
),
220 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
), sizeof(UWord
));
221 VKI_UC_GUEST_CC_DEP2(uc
) = tst
->arch
.vex
.guest_CC_DEP2
;
222 VG_TRACK(copy_reg_to_mem
, part
, tid
,
223 offsetof(VexGuestAMD64State
, guest_CC_DEP2
),
224 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
), sizeof(UWord
));
225 VKI_UC_GUEST_RFLAGS_NEG(uc
) = ~uc
->uc_mcontext
.gregs
[VKI_REG_RFL
];
226 /* Calculate a checksum. */
231 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
232 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
233 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
234 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
235 buf
[4] = uc
->uc_mcontext
.gregs
[VKI_REG_RFL
];
236 checksum
= ML_(fletcher64
)((UInt
*)&buf
, sizeof(buf
) / sizeof(UInt
));
237 VKI_UC_GUEST_RFLAGS_CHECKSUM(uc
) = checksum
;
241 /* The fpregset_t structure on amd64 follows the layout that is used by the
242 FXSAVE instruction, therefore it is only necessary to call a VEX
243 function that simulates this instruction. */
244 LibVEX_GuestAMD64_fxsave(&tst
->arch
.vex
, (HWord
)fs
);
247 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->cw
, sizeof(fs
->cw
));
249 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->sw
, sizeof(fs
->sw
));
250 /* Compressed tag word */
251 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->fctw
, sizeof(fs
->fctw
));
253 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->__fx_rsvd
,
254 sizeof(fs
->__fx_rsvd
));
255 vg_assert(fs
->__fx_rsvd
== 0);
256 /* Last x87 opcode */
257 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->fop
, sizeof(fs
->fop
));
258 vg_assert(fs
->fop
== 0);
259 /* Last x87 instruction pointer */
260 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->rip
, sizeof(fs
->rip
));
261 vg_assert(fs
->rip
== 0);
262 /* Last x87 data pointer */
263 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->rdp
, sizeof(fs
->rdp
));
264 vg_assert(fs
->rdp
== 0);
265 /* Media-instruction control and status register */
266 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
267 /* Supported features in MXCSR */
268 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->mxcsr_mask
,
269 sizeof(fs
->mxcsr_mask
));
272 for (i
= 0; i
< 8; i
++) {
273 Addr addr
= (Addr
)&fs
->st
[i
];
274 /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
275 have to lie here. :< */
276 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
277 guest_FPREG
[i
]), addr
, sizeof(ULong
));
278 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
279 guest_FPREG
[i
]), addr
+ 8, sizeof(UShort
));
283 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
284 guest_YMM0
), (Addr
)&fs
->xmm
[0], sizeof(U128
));
285 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
286 guest_YMM1
), (Addr
)&fs
->xmm
[1], sizeof(U128
));
287 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
288 guest_YMM2
), (Addr
)&fs
->xmm
[2], sizeof(U128
));
289 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
290 guest_YMM3
), (Addr
)&fs
->xmm
[3], sizeof(U128
));
291 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
292 guest_YMM4
), (Addr
)&fs
->xmm
[4], sizeof(U128
));
293 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
294 guest_YMM5
), (Addr
)&fs
->xmm
[5], sizeof(U128
));
295 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
296 guest_YMM6
), (Addr
)&fs
->xmm
[6], sizeof(U128
));
297 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestAMD64State
,
298 guest_YMM7
), (Addr
)&fs
->xmm
[7], sizeof(U128
));
300 /* Status word (sw) at exception */
302 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->status
, sizeof(fs
->status
));
304 /* MXCSR at exception */
306 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->xstatus
,
307 sizeof(fs
->xstatus
));
310 /* Architecture-specific part of VG_(restore_context). */
311 void ML_(restore_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
312 CorePart part
, Bool esp_is_thrptr
)
314 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
315 struct vki_fpchip_state
*fs
316 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
319 if (uc
->uc_flags
& VKI_UC_CPU
) {
320 /* Common registers */
321 tst
->arch
.vex
.guest_RIP
= uc
->uc_mcontext
.gregs
[VKI_REG_RIP
];
322 VG_TRACK(copy_mem_to_reg
, part
, tid
,
323 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RIP
], OFFSET_amd64_RIP
,
325 tst
->arch
.vex
.guest_RAX
= uc
->uc_mcontext
.gregs
[VKI_REG_RAX
];
326 VG_TRACK(copy_mem_to_reg
, part
, tid
,
327 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RAX
], OFFSET_amd64_RAX
,
329 tst
->arch
.vex
.guest_RBX
= uc
->uc_mcontext
.gregs
[VKI_REG_RBX
];
330 VG_TRACK(copy_mem_to_reg
, part
, tid
,
331 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RBX
], OFFSET_amd64_RBX
,
333 tst
->arch
.vex
.guest_RCX
= uc
->uc_mcontext
.gregs
[VKI_REG_RCX
];
334 VG_TRACK(copy_mem_to_reg
, part
, tid
,
335 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RCX
], OFFSET_amd64_RCX
,
337 tst
->arch
.vex
.guest_RDX
= uc
->uc_mcontext
.gregs
[VKI_REG_RDX
];
338 VG_TRACK(copy_mem_to_reg
, part
, tid
,
339 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RDX
], OFFSET_amd64_RDX
,
341 tst
->arch
.vex
.guest_RBP
= uc
->uc_mcontext
.gregs
[VKI_REG_RBP
];
342 VG_TRACK(copy_mem_to_reg
, part
, tid
,
343 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RBP
], OFFSET_amd64_RBP
,
345 tst
->arch
.vex
.guest_RSI
= uc
->uc_mcontext
.gregs
[VKI_REG_RSI
];
346 VG_TRACK(copy_mem_to_reg
, part
, tid
,
347 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RSI
], OFFSET_amd64_RSI
,
349 tst
->arch
.vex
.guest_RDI
= uc
->uc_mcontext
.gregs
[VKI_REG_RDI
];
350 VG_TRACK(copy_mem_to_reg
, part
, tid
,
351 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RDI
], OFFSET_amd64_RDI
,
353 tst
->arch
.vex
.guest_R8
= uc
->uc_mcontext
.gregs
[VKI_REG_R8
];
354 VG_TRACK(copy_mem_to_reg
, part
, tid
,
355 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R8
], OFFSET_amd64_R8
,
357 tst
->arch
.vex
.guest_R9
= uc
->uc_mcontext
.gregs
[VKI_REG_R9
];
358 VG_TRACK(copy_mem_to_reg
, part
, tid
,
359 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R9
], OFFSET_amd64_R9
,
361 tst
->arch
.vex
.guest_R10
= uc
->uc_mcontext
.gregs
[VKI_REG_R10
];
362 VG_TRACK(copy_mem_to_reg
, part
, tid
,
363 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R10
], OFFSET_amd64_R10
,
365 tst
->arch
.vex
.guest_R11
= uc
->uc_mcontext
.gregs
[VKI_REG_R11
];
366 VG_TRACK(copy_mem_to_reg
, part
, tid
,
367 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R11
], OFFSET_amd64_R11
,
369 tst
->arch
.vex
.guest_R12
= uc
->uc_mcontext
.gregs
[VKI_REG_R12
];
370 VG_TRACK(copy_mem_to_reg
, part
, tid
,
371 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R12
], OFFSET_amd64_R12
,
373 tst
->arch
.vex
.guest_R13
= uc
->uc_mcontext
.gregs
[VKI_REG_R13
];
374 VG_TRACK(copy_mem_to_reg
, part
, tid
,
375 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R13
], OFFSET_amd64_R13
,
377 tst
->arch
.vex
.guest_R14
= uc
->uc_mcontext
.gregs
[VKI_REG_R14
];
378 VG_TRACK(copy_mem_to_reg
, part
, tid
,
379 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R14
], OFFSET_amd64_R14
,
381 tst
->arch
.vex
.guest_R15
= uc
->uc_mcontext
.gregs
[VKI_REG_R15
];
382 VG_TRACK(copy_mem_to_reg
, part
, tid
,
383 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_R15
], OFFSET_amd64_R15
,
385 tst
->arch
.vex
.guest_RSP
= uc
->uc_mcontext
.gregs
[VKI_REG_RSP
];
386 VG_TRACK(copy_mem_to_reg
, part
, tid
,
387 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RSP
], OFFSET_amd64_RSP
,
390 /* Ignore ERR and TRAPNO. */
392 /* Ignore segment registers. */
395 tst
->arch
.vex
.guest_FS_CONST
= uc
->uc_mcontext
.gregs
[VKI_REG_FSBASE
];
396 VG_TRACK(copy_mem_to_reg
, part
, tid
,
397 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_FSBASE
],
398 offsetof(VexGuestAMD64State
, guest_FS_CONST
), sizeof(UWord
));
400 /* Rflags. Refer to the x86-solaris variant of this code for a detailed
406 Bool ok_restore
= False
;
408 VG_TRACK(pre_mem_read
, part
, tid
,
409 "restore_machine_context(uc->uc_mcontext.gregs[VKI_REG_RFL])",
410 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_REG_RFL
], sizeof(UWord
));
411 rflags
= uc
->uc_mcontext
.gregs
[VKI_REG_RFL
];
412 orig_rflags
= LibVEX_GuestAMD64_get_rflags(&tst
->arch
.vex
);
414 /* The kernel disallows the ID flag to be changed via the setcontext
415 call, thus do the same. */
416 if (orig_rflags
& VKI_RFLAGS_ID_BIT
)
417 new_rflags
|= VKI_RFLAGS_ID_BIT
;
419 new_rflags
&= ~VKI_RFLAGS_ID_BIT
;
420 LibVEX_GuestAMD64_put_rflags(new_rflags
, &tst
->arch
.vex
);
421 VG_TRACK(post_reg_write
, part
, tid
,
422 offsetof(VexGuestAMD64State
, guest_CC_DEP1
), sizeof(UWord
));
423 VG_TRACK(post_reg_write
, part
, tid
,
424 offsetof(VexGuestAMD64State
, guest_CC_DEP2
), sizeof(UWord
));
426 if (rflags
!= ~VKI_UC_GUEST_RFLAGS_NEG(uc
)) {
427 VG_(debugLog
)(1, "syswrap-solaris",
428 "The rflags value was restored from an "
429 "explicitly set value in thread %u.\n", tid
);
436 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
437 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
438 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
439 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
441 checksum
= ML_(fletcher64
)((UInt
*)&buf
,
442 sizeof(buf
) / sizeof(UInt
));
443 if (checksum
== VKI_UC_GUEST_RFLAGS_CHECKSUM(uc
)) {
444 /* Check ok, the full restoration is possible. */
445 VG_(debugLog
)(1, "syswrap-solaris",
446 "The CC_* guest state values were fully "
447 "restored in thread %u.\n", tid
);
450 tst
->arch
.vex
.guest_CC_OP
= VKI_UC_GUEST_CC_OP(uc
);
451 tst
->arch
.vex
.guest_CC_NDEP
= VKI_UC_GUEST_CC_NDEP(uc
);
452 tst
->arch
.vex
.guest_CC_DEP1
= VKI_UC_GUEST_CC_DEP1(uc
);
453 VG_TRACK(copy_mem_to_reg
, part
, tid
,
454 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
),
455 offsetof(VexGuestAMD64State
, guest_CC_DEP1
),
457 tst
->arch
.vex
.guest_CC_DEP2
= VKI_UC_GUEST_CC_DEP2(uc
);
458 VG_TRACK(copy_mem_to_reg
, part
, tid
,
459 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
),
460 offsetof(VexGuestAMD64State
, guest_CC_DEP2
),
466 VG_(debugLog
)(1, "syswrap-solaris",
467 "Cannot fully restore the CC_* guest state "
468 "values, using approximate rflags in thread "
473 if (uc
->uc_flags
& VKI_UC_FPU
) {
480 VG_TRACK(pre_mem_read
, part
, tid
,
481 "restore_machine_context(uc->uc_mcontext.fpregs..cw)",
482 (Addr
)&fs
->cw
, sizeof(fs
->cw
));
484 VG_TRACK(pre_mem_read
, part
, tid
,
485 "restore_machine_context(uc->uc_mcontext.fpregs..sw)",
486 (Addr
)&fs
->sw
, sizeof(fs
->sw
));
487 /* Compressed tag word */
488 VG_TRACK(pre_mem_read
, part
, tid
,
489 "restore_machine_context(uc->uc_mcontext.fpregs..fctw)",
490 (Addr
)&fs
->fctw
, sizeof(fs
->fctw
));
491 /* Last x87 opcode */
492 VG_TRACK(pre_mem_read
, part
, tid
,
493 "restore_machine_context(uc->uc_mcontext.fpregs..fop)",
494 (Addr
)&fs
->fop
, sizeof(fs
->fop
));
495 /* Last x87 instruction pointer */
496 VG_TRACK(pre_mem_read
, part
, tid
,
497 "restore_machine_context(uc->uc_mcontext.fpregs..rip)",
498 (Addr
)&fs
->rip
, sizeof(fs
->rip
));
499 /* Last x87 data pointer */
500 VG_TRACK(pre_mem_read
, part
, tid
,
501 "restore_machine_context(uc->uc_mcontext.fpregs..rdp)",
502 (Addr
)&fs
->rdp
, sizeof(fs
->rdp
));
503 /* Media-instruction control and status register */
504 VG_TRACK(pre_mem_read
, part
, tid
,
505 "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
506 (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
507 /* Supported features in MXCSR */
508 VG_TRACK(pre_mem_read
, part
, tid
,
509 "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr_mask)",
510 (Addr
)&fs
->mxcsr_mask
, sizeof(fs
->mxcsr_mask
));
513 for (i
= 0; i
< 8; i
++) {
514 Addr addr
= (Addr
)&fs
->st
[i
];
515 VG_TRACK(copy_mem_to_reg
, part
, tid
, addr
,
516 offsetof(VexGuestAMD64State
, guest_FPREG
[i
]), sizeof(ULong
));
520 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[0],
521 offsetof(VexGuestAMD64State
, guest_YMM0
), sizeof(U128
));
522 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[1],
523 offsetof(VexGuestAMD64State
, guest_YMM1
), sizeof(U128
));
524 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[2],
525 offsetof(VexGuestAMD64State
, guest_YMM2
), sizeof(U128
));
526 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[3],
527 offsetof(VexGuestAMD64State
, guest_YMM3
), sizeof(U128
));
528 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[4],
529 offsetof(VexGuestAMD64State
, guest_YMM4
), sizeof(U128
));
530 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[5],
531 offsetof(VexGuestAMD64State
, guest_YMM5
), sizeof(U128
));
532 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[6],
533 offsetof(VexGuestAMD64State
, guest_YMM6
), sizeof(U128
));
534 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[7],
535 offsetof(VexGuestAMD64State
, guest_YMM7
), sizeof(U128
));
537 note
= LibVEX_GuestAMD64_fxrstor((HWord
)fs
, &tst
->arch
.vex
);
538 if (note
!= EmNote_NONE
)
539 VG_(message
)(Vg_UserMsg
,
540 "Error restoring FP state in thread %u: %s.\n",
541 tid
, LibVEX_EmNote_string(note
));
546 /* ---------------------------------------------------------------------
547 PRE/POST wrappers for AMD64/Solaris-specific syscalls
548 ------------------------------------------------------------------ */
550 #define PRE(name) DEFN_PRE_TEMPLATE(amd64_solaris, name)
551 #define POST(name) DEFN_POST_TEMPLATE(amd64_solaris, name)
558 #endif // defined(VGP_amd64_solaris)
560 /*--------------------------------------------------------------------*/
562 /*--------------------------------------------------------------------*/