ms-manual.xml: Put stray ':' inside para.
[valgrind.git] / coregrind / m_syswrap / syswrap-x86-solaris.c
blobbe3662511225c1265c27338131729e85d727ac9e
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2011-2017 Petr Pavlu
11 setup@dagobah.cz
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #if defined(VGP_x86_solaris)
31 #include "libvex_guest_offsets.h"
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_threadstate.h"
35 #include "pub_core_aspacemgr.h"
36 #include "pub_core_xarray.h"
37 #include "pub_core_clientstate.h"
38 #include "pub_core_debuglog.h"
39 #include "pub_core_libcassert.h"
40 #include "pub_core_libcbase.h"
41 #include "pub_core_libcfile.h"
42 #include "pub_core_libcprint.h"
43 #include "pub_core_libcsignal.h"
44 #include "pub_core_machine.h" // VG_(get_SP)
45 #include "pub_core_mallocfree.h"
46 #include "pub_core_options.h"
47 #include "pub_core_tooliface.h"
48 #include "pub_core_signals.h"
49 #include "pub_core_syscall.h"
50 #include "pub_core_syswrap.h"
52 #include "priv_types_n_macros.h"
53 #include "priv_syswrap-generic.h"
54 #include "priv_syswrap-solaris.h"
56 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
57 use 'retaddr' as f's return-to address. Also, clear all the integer
58 registers before entering f. */
59 __attribute__((noreturn))
60 void ML_(call_on_new_stack_0_1)(Addr stack, /* 4(%esp) */
61 Addr retaddr, /* 8(%esp) */
62 void (*f)(Word), /* 12(%esp) */
63 Word arg1); /* 16(%esp) */
64 __asm__ (
65 ".text\n"
66 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
67 "vgModuleLocal_call_on_new_stack_0_1:\n"
68 " movl %esp, %esi\n" /* remember old stack pointer */
69 " movl 4(%esi), %esp\n" /* set stack */
70 " pushl $0\n" /* align stack */
71 " pushl $0\n" /* align stack */
72 " pushl $0\n" /* align stack */
73 " pushl 16(%esi)\n" /* arg1 to stack */
74 " pushl 8(%esi)\n" /* retaddr to stack */
75 " pushl 12(%esi)\n" /* f to stack */
76 " movl $0, %eax\n" /* zero all GP regs */
77 " movl $0, %ebx\n"
78 " movl $0, %ecx\n"
79 " movl $0, %edx\n"
80 " movl $0, %esi\n"
81 " movl $0, %edi\n"
82 " movl $0, %ebp\n"
83 " ret\n" /* jump to f */
84 " ud2\n" /* should never get here */
85 ".previous\n"
88 /* This function is called to setup a context of a new Valgrind thread (which
89 will run the client code). */
90 void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
92 ThreadState *tst = VG_(get_ThreadState)(tid);
93 UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
94 UShort cs, ds, ss, es, fs, gs;
96 VG_(memset)(uc, 0, sizeof(*uc));
97 uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
99 /* Start the thread with everything blocked. */
100 VG_(sigfillset)(&uc->uc_sigmask);
102 /* Set up the stack, it should be always 16-byte aligned before doing
103 a function call, i.e. the first parameter is also 16-byte aligned. */
104 vg_assert(VG_IS_16_ALIGNED(stack));
105 stack -= 1;
106 stack[0] = 0; /* bogus return value */
107 stack[1] = (UWord)tst; /* the parameter */
109 /* Set up the registers. */
110 uc->uc_mcontext.gregs[VKI_EIP] = (UWord)ML_(start_thread_NORETURN);
111 uc->uc_mcontext.gregs[VKI_UESP] = (UWord)stack;
113 /* Copy segment registers. */
114 __asm__ __volatile__(
115 "movw %%cs, %[cs]\n"
116 "movw %%ds, %[ds]\n"
117 "movw %%ss, %[ss]\n"
118 "movw %%es, %[es]\n"
119 "movw %%fs, %[fs]\n"
120 "movw %%gs, %[gs]\n"
121 : [cs] "=m" (cs), [ds] "=m" (ds), [ss] "=m" (ss), [es] "=m" (es),
122 [fs] "=m" (fs), [gs] "=m" (gs));
123 uc->uc_mcontext.gregs[VKI_CS] = cs;
124 uc->uc_mcontext.gregs[VKI_DS] = ds;
125 uc->uc_mcontext.gregs[VKI_SS] = ss;
126 uc->uc_mcontext.gregs[VKI_ES] = es;
127 uc->uc_mcontext.gregs[VKI_FS] = fs;
128 uc->uc_mcontext.gregs[VKI_GS] = gs;
131 /* Architecture-specific part of VG_(save_context). */
132 void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
133 CorePart part)
135 ThreadState *tst = VG_(get_ThreadState)(tid);
136 struct vki_fpchip_state *fs
137 = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
138 SizeT i;
140 /* CPU */
141 /* Common registers */
142 uc->uc_mcontext.gregs[VKI_EIP] = tst->arch.vex.guest_EIP;
143 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EIP,
144 (Addr)&uc->uc_mcontext.gregs[VKI_EIP], sizeof(UWord));
145 uc->uc_mcontext.gregs[VKI_EAX] = tst->arch.vex.guest_EAX;
146 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EAX,
147 (Addr)&uc->uc_mcontext.gregs[VKI_EAX], sizeof(UWord));
148 uc->uc_mcontext.gregs[VKI_EBX] = tst->arch.vex.guest_EBX;
149 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBX,
150 (Addr)&uc->uc_mcontext.gregs[VKI_EBX], sizeof(UWord));
151 uc->uc_mcontext.gregs[VKI_ECX] = tst->arch.vex.guest_ECX;
152 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ECX,
153 (Addr)&uc->uc_mcontext.gregs[VKI_ECX], sizeof(UWord));
154 uc->uc_mcontext.gregs[VKI_EDX] = tst->arch.vex.guest_EDX;
155 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDX,
156 (Addr)&uc->uc_mcontext.gregs[VKI_EDX], sizeof(UWord));
157 uc->uc_mcontext.gregs[VKI_EBP] = tst->arch.vex.guest_EBP;
158 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBP,
159 (Addr)&uc->uc_mcontext.gregs[VKI_EBP], sizeof(UWord));
160 uc->uc_mcontext.gregs[VKI_ESI] = tst->arch.vex.guest_ESI;
161 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESI,
162 (Addr)&uc->uc_mcontext.gregs[VKI_ESI], sizeof(UWord));
163 uc->uc_mcontext.gregs[VKI_EDI] = tst->arch.vex.guest_EDI;
164 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDI,
165 (Addr)&uc->uc_mcontext.gregs[VKI_EDI], sizeof(UWord));
166 uc->uc_mcontext.gregs[VKI_UESP] = tst->arch.vex.guest_ESP;
167 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESP,
168 (Addr)&uc->uc_mcontext.gregs[VKI_UESP], sizeof(UWord));
169 uc->uc_mcontext.gregs[VKI_ESP] = 0;
170 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ESP],
171 sizeof(UWord));
173 /* ERR and TRAPNO */
174 uc->uc_mcontext.gregs[VKI_ERR] = 0;
175 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ERR],
176 sizeof(UWord));
177 uc->uc_mcontext.gregs[VKI_TRAPNO] = 0;
178 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_TRAPNO],
179 sizeof(UWord));
181 /* Segment registers */
182 /* Note that segment registers are 16b in VEX, but 32b in mcontext. Thus
183 we tell a tool that the lower 16 bits were copied and that the higher 16
184 bits were set (to zero). (This assumes a little-endian
185 architecture.) */
186 uc->uc_mcontext.gregs[VKI_CS] = tst->arch.vex.guest_CS;
187 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_CS,
188 (Addr)&uc->uc_mcontext.gregs[VKI_CS], sizeof(UShort));
189 VG_TRACK(post_mem_write, part, tid,
190 (Addr)(&uc->uc_mcontext.gregs[VKI_CS]) + 2, sizeof(UShort));
191 uc->uc_mcontext.gregs[VKI_DS] = tst->arch.vex.guest_DS;
192 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_DS,
193 (Addr)&uc->uc_mcontext.gregs[VKI_DS], sizeof(UShort));
194 VG_TRACK(post_mem_write, part, tid,
195 (Addr)(&uc->uc_mcontext.gregs[VKI_DS]) + 2, sizeof(UShort));
196 uc->uc_mcontext.gregs[VKI_SS] = tst->arch.vex.guest_SS;
197 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_SS,
198 (Addr)&uc->uc_mcontext.gregs[VKI_SS], sizeof(UShort));
199 VG_TRACK(post_mem_write, part, tid,
200 (Addr)(&uc->uc_mcontext.gregs[VKI_SS]) + 2, sizeof(UShort));
201 uc->uc_mcontext.gregs[VKI_ES] = tst->arch.vex.guest_ES;
202 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ES,
203 (Addr)&uc->uc_mcontext.gregs[VKI_ES], sizeof(UShort));
204 VG_TRACK(post_mem_write, part, tid,
205 (Addr)(&uc->uc_mcontext.gregs[VKI_ES]) + 2, sizeof(UShort));
206 uc->uc_mcontext.gregs[VKI_FS] = tst->arch.vex.guest_FS;
207 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_FS,
208 (Addr)&uc->uc_mcontext.gregs[VKI_FS], sizeof(UShort));
209 VG_TRACK(post_mem_write, part, tid,
210 (Addr)(&uc->uc_mcontext.gregs[VKI_FS]) + 2, sizeof(UShort));
211 uc->uc_mcontext.gregs[VKI_GS] = tst->arch.vex.guest_GS;
212 VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_GS,
213 (Addr)&uc->uc_mcontext.gregs[VKI_GS], sizeof(UShort));
214 VG_TRACK(post_mem_write, part, tid,
215 (Addr)(&uc->uc_mcontext.gregs[VKI_GS]) + 2, sizeof(UShort));
217 /* Handle eflags (optimistically make all flags defined). */
218 uc->uc_mcontext.gregs[VKI_EFL] =
219 LibVEX_GuestX86_get_eflags(&tst->arch.vex);
220 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_EFL],
221 sizeof(UWord));
222 /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
223 CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
224 values. The *FLAG values represent one-bit information and are saved
225 without loss of precision into eflags. However when CC_* values are
226 converted into eflags then precision is lost. What we do here is to
227 save unmodified CC_* values into unused ucontext members (the 'long
228 uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
229 context in ML_(restore_machine_context)() without the loss of precision.
230 This imposes a requirement on client programs to not use these two
231 members. Luckily this is never a case in Solaris-gate programs and
232 libraries. */
233 /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
234 that we just defined uc_filler[0,1]. This helps if someone uses an
235 uninitialized ucontext and tries to read (use) uc_filler[0,1]. Memcheck
236 in such a case should detect this error. */
237 VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
238 VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
239 /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
240 a tool about this copy. */
241 VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
242 VG_TRACK(copy_reg_to_mem, part, tid,
243 offsetof(VexGuestX86State, guest_CC_DEP1),
244 (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
245 VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
246 VG_TRACK(copy_reg_to_mem, part, tid,
247 offsetof(VexGuestX86State, guest_CC_DEP2),
248 (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
249 /* Make another copy of eflags. */
250 VKI_UC_GUEST_EFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_EFL];
251 /* Calculate a checksum. */
253 UInt buf[5];
254 UInt checksum;
256 buf[0] = VKI_UC_GUEST_CC_OP(uc);
257 buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
258 buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
259 buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
260 buf[4] = uc->uc_mcontext.gregs[VKI_EFL];
261 checksum = ML_(fletcher32)((UShort*)&buf, sizeof(buf) / sizeof(UShort));
262 /* Store the checksum. */
263 VKI_UC_GUEST_EFLAGS_CHECKSUM(uc) = checksum;
266 /* FPU */
267 /* x87 */
268 vg_assert(sizeof(fs->state) == 108);
269 LibVEX_GuestX86_get_x87(&tst->arch.vex, (UChar*)&fs->state);
271 /* Flags and control words */
272 VG_TRACK(post_mem_write, part, tid, (Addr)&fs->state, 28);
273 /* ST registers */
274 for (i = 0; i < 8; i++) {
275 Addr addr = (Addr)&fs->state + 28 + i * 10;
276 /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
277 have to lie here. :< */
278 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
279 guest_FPREG[i]), addr, sizeof(ULong));
280 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
281 guest_FPREG[i]), addr + 8, sizeof(UShort));
284 /* Status word (sw) at exception */
285 fs->status = 0;
286 VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
288 /* SSE */
289 fs->mxcsr = LibVEX_GuestX86_get_mxcsr(&tst->arch.vex);
290 VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
292 /* MXCSR at exception */
293 fs->xstatus = 0;
294 VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
295 sizeof(fs->xstatus));
297 /* XMM registers */
298 #define COPY_OUT_XMM(dest, src) \
299 do { \
300 dest._l[0] = src[0]; \
301 dest._l[1] = src[1]; \
302 dest._l[2] = src[2]; \
303 dest._l[3] = src[3]; \
304 } while (0)
305 COPY_OUT_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
306 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
307 guest_XMM0), (Addr)&fs->xmm[0], sizeof(U128));
308 COPY_OUT_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
309 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
310 guest_XMM1), (Addr)&fs->xmm[1], sizeof(U128));
311 COPY_OUT_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
312 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
313 guest_XMM2), (Addr)&fs->xmm[2], sizeof(U128));
314 COPY_OUT_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
315 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
316 guest_XMM3), (Addr)&fs->xmm[3], sizeof(U128));
317 COPY_OUT_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
318 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
319 guest_XMM4), (Addr)&fs->xmm[4], sizeof(U128));
320 COPY_OUT_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
321 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
322 guest_XMM5), (Addr)&fs->xmm[5], sizeof(U128));
323 COPY_OUT_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
324 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
325 guest_XMM6), (Addr)&fs->xmm[6], sizeof(U128));
326 COPY_OUT_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
327 VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
328 guest_XMM7), (Addr)&fs->xmm[7], sizeof(U128));
329 #undef COPY_OUT_XMM
332 /* Architecture-specific part of VG_(restore_context). */
333 void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
334 CorePart part, Bool esp_is_thrptr)
336 ThreadState *tst = VG_(get_ThreadState)(tid);
337 struct vki_fpchip_state *fs
338 = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
340 /* CPU */
341 if (uc->uc_flags & VKI_UC_CPU) {
342 /* Common registers */
343 tst->arch.vex.guest_EIP = uc->uc_mcontext.gregs[VKI_EIP];
344 VG_TRACK(copy_mem_to_reg, part, tid,
345 (Addr)&uc->uc_mcontext.gregs[VKI_EIP], OFFSET_x86_EIP,
346 sizeof(UWord));
347 tst->arch.vex.guest_EAX = uc->uc_mcontext.gregs[VKI_EAX];
348 VG_TRACK(copy_mem_to_reg, part, tid,
349 (Addr)&uc->uc_mcontext.gregs[VKI_EAX], OFFSET_x86_EAX,
350 sizeof(UWord));
351 tst->arch.vex.guest_EBX = uc->uc_mcontext.gregs[VKI_EBX];
352 VG_TRACK(copy_mem_to_reg, part, tid,
353 (Addr)&uc->uc_mcontext.gregs[VKI_EBX], OFFSET_x86_EBX,
354 sizeof(UWord));
355 tst->arch.vex.guest_ECX = uc->uc_mcontext.gregs[VKI_ECX];
356 VG_TRACK(copy_mem_to_reg, part, tid,
357 (Addr)&uc->uc_mcontext.gregs[VKI_ECX], OFFSET_x86_ECX,
358 sizeof(UWord));
359 tst->arch.vex.guest_EDX = uc->uc_mcontext.gregs[VKI_EDX];
360 VG_TRACK(copy_mem_to_reg, part, tid,
361 (Addr)&uc->uc_mcontext.gregs[VKI_EDX], OFFSET_x86_EDX,
362 sizeof(UWord));
363 tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
364 VG_TRACK(copy_mem_to_reg, part, tid,
365 (Addr)&uc->uc_mcontext.gregs[VKI_EBP], OFFSET_x86_EBP,
366 sizeof(UWord));
367 tst->arch.vex.guest_ESI = uc->uc_mcontext.gregs[VKI_ESI];
368 VG_TRACK(copy_mem_to_reg, part, tid,
369 (Addr)&uc->uc_mcontext.gregs[VKI_ESI], OFFSET_x86_ESI,
370 sizeof(UWord));
371 tst->arch.vex.guest_EDI = uc->uc_mcontext.gregs[VKI_EDI];
372 VG_TRACK(copy_mem_to_reg, part, tid,
373 (Addr)&uc->uc_mcontext.gregs[VKI_EDI], OFFSET_x86_EDI,
374 sizeof(UWord));
375 tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
376 VG_TRACK(copy_mem_to_reg, part, tid,
377 (Addr)&uc->uc_mcontext.gregs[VKI_UESP], OFFSET_x86_ESP,
378 sizeof(UWord));
380 if (esp_is_thrptr) {
381 /* The thrptr value is passed by libc to the kernel in the otherwise
382 unused ESP field. This is used when a new thread is created. */
383 VG_TRACK(pre_mem_read, part, tid,
384 "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
385 (Addr)&uc->uc_mcontext.gregs[VKI_ESP], sizeof(UWord));
386 if (uc->uc_mcontext.gregs[VKI_ESP]) {
387 tst->os_state.thrptr = uc->uc_mcontext.gregs[VKI_ESP];
388 ML_(update_gdt_lwpgs)(tid);
392 /* Ignore ERR and TRAPNO. */
394 /* Segment registers */
395 tst->arch.vex.guest_CS = uc->uc_mcontext.gregs[VKI_CS];
396 VG_TRACK(copy_mem_to_reg, part, tid,
397 (Addr)&uc->uc_mcontext.gregs[VKI_CS], OFFSET_x86_CS,
398 sizeof(UShort));
399 tst->arch.vex.guest_DS = uc->uc_mcontext.gregs[VKI_DS];
400 VG_TRACK(copy_mem_to_reg, part, tid,
401 (Addr)&uc->uc_mcontext.gregs[VKI_DS], OFFSET_x86_DS,
402 sizeof(UShort));
403 tst->arch.vex.guest_SS = uc->uc_mcontext.gregs[VKI_SS];
404 VG_TRACK(copy_mem_to_reg, part, tid,
405 (Addr)&uc->uc_mcontext.gregs[VKI_SS], OFFSET_x86_SS,
406 sizeof(UShort));
407 tst->arch.vex.guest_ES = uc->uc_mcontext.gregs[VKI_ES];
408 VG_TRACK(copy_mem_to_reg, part, tid,
409 (Addr)&uc->uc_mcontext.gregs[VKI_ES], OFFSET_x86_ES,
410 sizeof(UShort));
411 tst->arch.vex.guest_FS = uc->uc_mcontext.gregs[VKI_FS];
412 VG_TRACK(copy_mem_to_reg, part, tid,
413 (Addr)&uc->uc_mcontext.gregs[VKI_FS], OFFSET_x86_FS,
414 sizeof(UShort));
415 tst->arch.vex.guest_GS = uc->uc_mcontext.gregs[VKI_GS];
416 VG_TRACK(copy_mem_to_reg, part, tid,
417 (Addr)&uc->uc_mcontext.gregs[VKI_GS], OFFSET_x86_GS,
418 sizeof(UShort));
420 /* Eflags */
422 UInt eflags;
423 UInt orig_eflags;
424 UInt new_eflags;
425 Bool ok_restore = False;
427 VG_TRACK(pre_mem_read, part, tid,
428 "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
429 (Addr)&uc->uc_mcontext.gregs[VKI_EFL], sizeof(UWord));
430 eflags = uc->uc_mcontext.gregs[VKI_EFL];
431 orig_eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
432 new_eflags = eflags;
433 /* The kernel disallows the ID flag to be changed via the setcontext
434 call, thus do the same. */
435 if (orig_eflags & VKI_EFLAGS_ID_BIT)
436 new_eflags |= VKI_EFLAGS_ID_BIT;
437 else
438 new_eflags &= ~VKI_EFLAGS_ID_BIT;
439 LibVEX_GuestX86_put_eflags(new_eflags, &tst->arch.vex);
440 VG_TRACK(post_reg_write, part, tid,
441 offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UWord));
442 VG_TRACK(post_reg_write, part, tid,
443 offsetof(VexGuestX86State, guest_CC_DEP2), sizeof(UWord));
445 /* Check if this context was created by us in VG_(save_context). In
446 that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
447 values which we previously stashed into unused members of the
448 context. */
449 if (eflags != ~VKI_UC_GUEST_EFLAGS_NEG(uc)) {
450 VG_(debugLog)(1, "syswrap-solaris",
451 "The eflags value was restored from an "
452 "explicitly set value in thread %u.\n", tid);
453 ok_restore = True;
455 else {
456 UInt buf[5];
457 UInt checksum;
459 buf[0] = VKI_UC_GUEST_CC_OP(uc);
460 buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
461 buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
462 buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
463 buf[4] = eflags;
464 checksum = ML_(fletcher32)((UShort*)&buf,
465 sizeof(buf) / sizeof(UShort));
466 if (checksum == VKI_UC_GUEST_EFLAGS_CHECKSUM(uc)) {
467 /* Check ok, the full restoration is possible. */
468 VG_(debugLog)(1, "syswrap-solaris",
469 "The CC_* guest state values were fully "
470 "restored in thread %u.\n", tid);
471 ok_restore = True;
473 tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
474 tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
475 tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
476 VG_TRACK(copy_mem_to_reg, part, tid,
477 (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
478 offsetof(VexGuestX86State, guest_CC_DEP1),
479 sizeof(UWord));
480 tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
481 VG_TRACK(copy_mem_to_reg, part, tid,
482 (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
483 offsetof(VexGuestX86State, guest_CC_DEP2),
484 sizeof(UWord));
488 if (!ok_restore)
489 VG_(debugLog)(1, "syswrap-solaris",
490 "Cannot fully restore the CC_* guest state "
491 "values, using approximate eflags in thread "
492 "%u.\n", tid);
496 if (uc->uc_flags & VKI_UC_FPU) {
497 /* FPU */
498 VexEmNote note;
499 SizeT i;
501 /* x87 */
502 /* Flags and control words */
503 VG_TRACK(pre_mem_read, part, tid,
504 "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
505 (Addr)&fs->state, 28);
506 /* ST registers */
507 for (i = 0; i < 8; i++) {
508 Addr addr = (Addr)&fs->state + 28 + i * 10;
509 VG_TRACK(copy_mem_to_reg, part, tid, addr,
510 offsetof(VexGuestX86State, guest_FPREG[i]), sizeof(ULong));
512 note = LibVEX_GuestX86_put_x87((UChar*)&fs->state, &tst->arch.vex);
513 if (note != EmNote_NONE)
514 VG_(message)(Vg_UserMsg,
515 "Error restoring x87 state in thread %u: %s.\n",
516 tid, LibVEX_EmNote_string(note));
518 /* SSE */
519 VG_TRACK(pre_mem_read, part, tid,
520 "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
521 (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
522 note = LibVEX_GuestX86_put_mxcsr(fs->mxcsr, &tst->arch.vex);
523 if (note != EmNote_NONE)
524 VG_(message)(Vg_UserMsg,
525 "Error restoring mxcsr state in thread %u: %s.\n",
526 tid, LibVEX_EmNote_string(note));
527 /* XMM registers */
528 #define COPY_IN_XMM(src, dest) \
529 do { \
530 dest[0] = src._l[0]; \
531 dest[1] = src._l[1]; \
532 dest[2] = src._l[2]; \
533 dest[3] = src._l[3]; \
534 } while (0)
535 COPY_IN_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
536 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
537 offsetof(VexGuestX86State, guest_XMM0), sizeof(U128));
538 COPY_IN_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
539 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
540 offsetof(VexGuestX86State, guest_XMM1), sizeof(U128));
541 COPY_IN_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
542 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
543 offsetof(VexGuestX86State, guest_XMM2), sizeof(U128));
544 COPY_IN_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
545 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
546 offsetof(VexGuestX86State, guest_XMM3), sizeof(U128));
547 COPY_IN_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
548 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
549 offsetof(VexGuestX86State, guest_XMM4), sizeof(U128));
550 COPY_IN_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
551 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
552 offsetof(VexGuestX86State, guest_XMM5), sizeof(U128));
553 COPY_IN_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
554 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
555 offsetof(VexGuestX86State, guest_XMM6), sizeof(U128));
556 COPY_IN_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
557 VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
558 offsetof(VexGuestX86State, guest_XMM7), sizeof(U128));
559 #undef COPY_IN_XMM
563 /* Allocate GDT for a given thread. */
564 void ML_(setup_gdt)(VexGuestX86State *vex)
566 Addr gdt = (Addr)VG_(calloc)("syswrap-solaris-x86.gdt",
567 VEX_GUEST_X86_GDT_NENT,
568 sizeof(VexGuestX86SegDescr));
569 vex->guest_GDT = gdt;
572 /* Deallocate GDT for a given thread. */
573 void ML_(cleanup_gdt)(VexGuestX86State *vex)
575 if (!vex->guest_GDT)
576 return;
577 VG_(free)((void *) (HWord) vex->guest_GDT);
578 vex->guest_GDT = 0;
581 /* For a given thread, update the LWPGS descriptor in the thread's GDT
582 according to the thread pointer. */
583 void ML_(update_gdt_lwpgs)(ThreadId tid)
585 ThreadState *tst = VG_(get_ThreadState)(tid);
586 Addr base = tst->os_state.thrptr;
587 VexGuestX86SegDescr *gdt
588 = (VexGuestX86SegDescr *) (HWord) tst->arch.vex.guest_GDT;
589 VexGuestX86SegDescr desc;
591 vg_assert(gdt);
593 VG_(memset)(&desc, 0, sizeof(desc));
594 if (base) {
595 desc.LdtEnt.Bits.LimitLow = -1;
596 desc.LdtEnt.Bits.LimitHi = -1;
597 desc.LdtEnt.Bits.BaseLow = base & 0xffff;
598 desc.LdtEnt.Bits.BaseMid = (base >> 16) & 0xff;
599 desc.LdtEnt.Bits.BaseHi = (base >> 24) & 0xff;
600 desc.LdtEnt.Bits.Pres = 1;
601 desc.LdtEnt.Bits.Dpl = 3; /* SEL_UPL */
602 desc.LdtEnt.Bits.Type = 19; /* SDT_MEMRWA */
603 desc.LdtEnt.Bits.Granularity = 1; /* SDP_PAGES */
604 desc.LdtEnt.Bits.Default_Big = 1; /* SDP_OP32 */
607 gdt[VKI_GDT_LWPGS] = desc;
609 /* Write %gs. */
610 tst->arch.vex.guest_GS = VKI_LWPGS_SEL;
611 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_GS,
612 sizeof(UShort));
616 /* ---------------------------------------------------------------------
617 PRE/POST wrappers for x86/Solaris-specific syscalls
618 ------------------------------------------------------------------ */
620 #define PRE(name) DEFN_PRE_TEMPLATE(x86_solaris, name)
621 #define POST(name) DEFN_POST_TEMPLATE(x86_solaris, name)
623 /* implementation */
625 PRE(sys_fstatat64)
627 /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
628 int flag); */
629 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", SARG1, ARG2,
630 (HChar*)ARG2, ARG3, SARG4);
631 PRE_REG_READ4(long, "fstatat64", int, fildes, const char *, path,
632 struct stat64 *, buf, int, flag);
633 if (ARG2)
634 PRE_MEM_RASCIIZ("fstatat64(path)", ARG2);
635 PRE_MEM_WRITE("fstatat64(buf)", ARG3, sizeof(struct vki_stat64));
637 /* Be strict. */
638 if (ARG1 != VKI_AT_FDCWD &&
639 !ML_(fd_allowed)(ARG1, "fstatat64", tid, False))
640 SET_STATUS_Failure(VKI_EBADF);
643 POST(sys_fstatat64)
645 POST_MEM_WRITE(ARG3, sizeof(struct vki_stat64));
648 PRE(sys_openat64)
650 /* int openat64(int fildes, const char *filename, int flags);
651 int openat64(int fildes, const char *filename, int flags, mode_t mode);
653 *flags |= SfMayBlock;
655 if (ARG3 & VKI_O_CREAT) {
656 /* 4-arg version */
657 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", SARG1, ARG2,
658 (HChar*)ARG2, SARG3, SARG4);
659 PRE_REG_READ4(long, "openat64", int, fildes, const char *, filename,
660 int, flags, vki_mode_t, mode);
662 else {
663 /* 3-arg version */
664 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", SARG1, ARG2, (HChar*)ARG2,
665 SARG3);
666 PRE_REG_READ3(long, "openat64", int, fildes, const char *, filename,
667 int, flags);
670 PRE_MEM_RASCIIZ("openat64(filename)", ARG2);
672 /* Be strict. */
673 if (ARG1 != VKI_AT_FDCWD && !ML_(fd_allowed)(ARG1, "openat64", tid, False))
674 SET_STATUS_Failure(VKI_EBADF);
677 POST(sys_openat64)
679 if (!ML_(fd_allowed)(RES, "openat64", tid, True)) {
680 VG_(close)(RES);
681 SET_STATUS_Failure(VKI_EMFILE);
683 else if (VG_(clo_track_fds))
684 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
687 PRE(sys_llseek32)
689 /* offset_t llseek(int fildes, offset_t offset, int whence); */
690 PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
691 PRE_REG_READ4(long, "llseek", int, fildes, vki_u32, offset_low,
692 vki_u32, offset_high, int, whence);
694 /* Stay sane. */
695 if (!ML_(fd_allowed)(ARG1, "llseek", tid, False))
696 SET_STATUS_Failure(VKI_EBADF);
699 PRE(sys_mmap64)
701 /* void *mmap64(void *addr, size_t len, int prot, int flags,
702 int fildes, uint32_t offlo, uint32_t offhi); */
703 /* Note this wrapper assumes a little-endian architecture, offlo and offhi
704 have to be swapped if a big-endian architecture is present. */
705 #if !defined(VG_LITTLEENDIAN)
706 #error "Unexpected endianness."
707 #endif /* !VG_LITTLEENDIAN */
709 SysRes r;
710 ULong u;
711 Off64T offset;
713 /* Stay sane. */
714 vg_assert(VKI_PAGE_SIZE == 4096);
715 vg_assert(sizeof(u) == sizeof(offset));
717 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
718 ARG1, ARG2, ARG3, ARG4, SARG5, ARG6, ARG7);
719 PRE_REG_READ7(long, "mmap", void *, start, vki_size_t, length,
720 int, prot, int, flags, int, fd, uint32_t, offlo,
721 uint32_t, offhi);
723 /* The offlo and offhi values can actually represent a negative value.
724 Make sure it's passed correctly to the generic mmap wrapper. */
725 u = ((ULong)ARG7 << 32) + ARG6;
726 offset = *(Off64T*)&u;
728 r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
729 SET_STATUS_from_SysRes(r);
732 PRE(sys_stat64)
734 /* int stat64(const char *path, struct stat64 *buf); */
735 PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
736 PRE_REG_READ2(long, "stat64", const char *, path, struct stat64 *, buf);
738 PRE_MEM_RASCIIZ("stat64(path)", ARG1);
739 PRE_MEM_WRITE("stat64(buf)", ARG2, sizeof(struct vki_stat64));
742 POST(sys_stat64)
744 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
747 PRE(sys_lstat64)
749 /* int lstat64(const char *path, struct stat64 *buf); */
750 PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
751 PRE_REG_READ2(long, "lstat64", const char *, path, struct stat64 *, buf);
753 PRE_MEM_RASCIIZ("lstat64(path)", ARG1);
754 PRE_MEM_WRITE("lstat64(buf)", ARG2, sizeof(struct vki_stat64));
757 POST(sys_lstat64)
759 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
762 PRE(sys_fstat64)
764 /* int fstat64(int fildes, struct stat64 *buf); */
765 PRINT("sys_fstat64 ( %ld, %#lx )", SARG1, ARG2);
766 PRE_REG_READ2(long, "fstat64", int, fildes, struct stat64 *, buf);
767 PRE_MEM_WRITE("fstat64(buf)", ARG2, sizeof(struct vki_stat64));
769 /* Be strict. */
770 if (!ML_(fd_allowed)(ARG1, "fstat64", tid, False))
771 SET_STATUS_Failure(VKI_EBADF);
774 POST(sys_fstat64)
776 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
779 static void do_statvfs64_post(struct vki_statvfs64 *stats, ThreadId tid)
781 POST_FIELD_WRITE(stats->f_bsize);
782 POST_FIELD_WRITE(stats->f_frsize);
783 POST_FIELD_WRITE(stats->f_blocks);
784 POST_FIELD_WRITE(stats->f_bfree);
785 POST_FIELD_WRITE(stats->f_bavail);
786 POST_FIELD_WRITE(stats->f_files);
787 POST_FIELD_WRITE(stats->f_ffree);
788 POST_FIELD_WRITE(stats->f_favail);
789 POST_FIELD_WRITE(stats->f_fsid);
790 POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
791 POST_FIELD_WRITE(stats->f_flag);
792 POST_FIELD_WRITE(stats->f_namemax);
793 POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
796 PRE(sys_statvfs64)
798 /* int statvfs64(const char *path, struct statvfs64 *buf); */
799 *flags |= SfMayBlock;
800 PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
801 PRE_REG_READ2(long, "statvfs64", const char *, path,
802 struct vki_statvfs64 *, buf);
803 PRE_MEM_RASCIIZ("statvfs64(path)", ARG1);
804 PRE_MEM_WRITE("statvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
807 POST(sys_statvfs64)
809 do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
812 PRE(sys_fstatvfs64)
814 /* int fstatvfs64(int fd, struct statvfs64 *buf); */
815 *flags |= SfMayBlock;
816 PRINT("sys_fstatvfs64 ( %ld, %#lx )", SARG1, ARG2);
817 PRE_REG_READ2(long, "fstatvfs64", int, fd, struct vki_statvfs64 *, buf);
818 PRE_MEM_WRITE("fstatvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
820 /* Be strict. */
821 if (!ML_(fd_allowed)(ARG1, "fstatvfs64", tid, False))
822 SET_STATUS_Failure(VKI_EBADF);
825 POST(sys_fstatvfs64)
827 do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
830 PRE(sys_setrlimit64)
832 /* int setrlimit64(int resource, struct rlimit64 *rlim); */
833 struct vki_rlimit64 *limit = (struct vki_rlimit64 *)ARG2;
834 PRINT("sys_setrlimit64 ( %ld, %#lx )", SARG1, ARG2);
835 PRE_REG_READ2(long, "setrlimit64", int, resource, struct rlimit64 *, rlim);
836 PRE_MEM_READ("setrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
838 if (limit && limit->rlim_cur > limit->rlim_max)
839 SET_STATUS_Failure(VKI_EINVAL);
840 else if (ARG1 == VKI_RLIMIT_NOFILE) {
841 if (limit->rlim_cur > VG_(fd_hard_limit) ||
842 limit->rlim_max != VG_(fd_hard_limit)) {
843 SET_STATUS_Failure(VKI_EPERM);
845 else {
846 VG_(fd_soft_limit) = limit->rlim_cur;
847 SET_STATUS_Success(0);
850 else if (ARG1 == VKI_RLIMIT_DATA) {
851 if (limit->rlim_cur > VG_(client_rlimit_data).rlim_max ||
852 limit->rlim_max > VG_(client_rlimit_data).rlim_max) {
853 SET_STATUS_Failure(VKI_EPERM);
855 else {
856 VG_(client_rlimit_data).rlim_max = limit->rlim_max;
857 VG_(client_rlimit_data).rlim_cur = limit->rlim_cur;
858 SET_STATUS_Success(0);
861 else if (ARG1 == VKI_RLIMIT_STACK && tid == 1) {
862 if (limit->rlim_cur > VG_(client_rlimit_stack).rlim_max ||
863 limit->rlim_max > VG_(client_rlimit_stack).rlim_max) {
864 SET_STATUS_Failure(VKI_EPERM);
866 else {
867 /* Change the value of client_stack_szB to the rlim_cur value but
868 only if it is smaller than the size of the allocated stack for the
869 client. */
870 if (limit->rlim_cur <= VG_(clstk_max_size))
871 VG_(threads)[tid].client_stack_szB = limit->rlim_cur;
873 VG_(client_rlimit_stack).rlim_max = limit->rlim_max;
874 VG_(client_rlimit_stack).rlim_cur = limit->rlim_cur;
875 SET_STATUS_Success(0);
880 PRE(sys_getrlimit64)
882 /* int getrlimit64(int resource, struct rlimit64 *rlim); */
883 PRINT("sys_getrlimit64 ( %ld, %#lx )", SARG1, ARG2);
884 PRE_REG_READ2(long, "getrlimit64",
885 int, resource, struct rlimit64 *, rlim);
886 PRE_MEM_WRITE("getrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
889 POST(sys_getrlimit64)
891 /* Based on common_post_getrlimit() from syswrap-generic.c. */
892 struct vki_rlimit64 *rlim = (struct vki_rlimit64*)ARG2;
894 POST_MEM_WRITE(ARG2, sizeof(struct vki_rlimit64));
896 switch (ARG1 /*resource*/) {
897 case VKI_RLIMIT_NOFILE:
898 rlim->rlim_cur = VG_(fd_soft_limit);
899 rlim->rlim_max = VG_(fd_hard_limit);
900 break;
901 case VKI_RLIMIT_DATA:
902 rlim->rlim_cur = VG_(client_rlimit_data).rlim_cur;
903 rlim->rlim_max = VG_(client_rlimit_data).rlim_max;
904 break;
905 case VKI_RLIMIT_STACK:
906 rlim->rlim_cur = VG_(client_rlimit_stack).rlim_cur;
907 rlim->rlim_max = VG_(client_rlimit_stack).rlim_max;
908 break;
912 PRE(sys_pread64)
914 /* ssize32_t pread64(int fd, void *buf, size32_t count,
915 uint32_t offset_1, uint32_t offset_2);
917 *flags |= SfMayBlock;
918 PRINT("sys_pread64 ( %ld, %#lx, %lu, %#lx, %#lx )",
919 SARG1, ARG2, ARG3, ARG4, ARG5);
920 PRE_REG_READ5(long, "pread64", int, fd, void *, buf, vki_size32_t, count,
921 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
922 PRE_MEM_WRITE("pread64(buf)", ARG2, ARG3);
924 /* Be strict. */
925 if (!ML_(fd_allowed)(ARG1, "pread64", tid, False))
926 SET_STATUS_Failure(VKI_EBADF);
929 POST(sys_pread64)
931 POST_MEM_WRITE(ARG2, RES);
934 PRE(sys_pwrite64)
936 /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
937 uint32_t offset_1, uint32_t offset_2);
939 *flags |= SfMayBlock;
940 PRINT("sys_pwrite64 ( %ld, %#lx, %lu, %#lx, %#lx )",
941 SARG1, ARG2, ARG3, ARG4, ARG5);
942 PRE_REG_READ5(long, "pwrite64", int, fd, void *, buf, vki_size32_t, count,
943 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
944 PRE_MEM_READ("pwrite64(buf)", ARG2, ARG3);
946 /* Be strict. */
947 if (!ML_(fd_allowed)(ARG1, "pwrite64", tid, False))
948 SET_STATUS_Failure(VKI_EBADF);
951 PRE(sys_open64)
953 /* int open64(const char *filename, int flags);
954 int open64(const char *filename, int flags, mode_t mode); */
955 *flags |= SfMayBlock;
957 if (ARG2 & VKI_O_CREAT) {
958 /* 3-arg version */
959 PRINT("sys_open64 ( %#lx(%s), %#lx, %ld )", ARG1, (HChar*)ARG1, ARG2,
960 SARG3);
961 PRE_REG_READ3(long, "open64", const char *, filename, int, flags,
962 vki_mode_t, mode);
964 else {
965 /* 2-arg version */
966 PRINT("sys_open64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
967 PRE_REG_READ2(long, "open64", const char *, filename, int, flags);
969 PRE_MEM_RASCIIZ("open(filename)", ARG1);
972 POST(sys_open64)
974 if (!ML_(fd_allowed)(RES, "open64", tid, True)) {
975 VG_(close)(RES);
976 SET_STATUS_Failure(VKI_EMFILE);
978 else if (VG_(clo_track_fds))
979 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
982 #undef PRE
983 #undef POST
985 #endif // defined(VGP_x86_solaris)
987 /*--------------------------------------------------------------------*/
988 /*--- end ---*/
989 /*--------------------------------------------------------------------*/