2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2011-2017 Petr Pavlu
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
29 #if defined(VGP_x86_solaris)
31 #include "libvex_guest_offsets.h"
32 #include "pub_core_basics.h"
33 #include "pub_core_vki.h"
34 #include "pub_core_threadstate.h"
35 #include "pub_core_aspacemgr.h"
36 #include "pub_core_xarray.h"
37 #include "pub_core_clientstate.h"
38 #include "pub_core_debuglog.h"
39 #include "pub_core_libcassert.h"
40 #include "pub_core_libcbase.h"
41 #include "pub_core_libcfile.h"
42 #include "pub_core_libcprint.h"
43 #include "pub_core_libcsignal.h"
44 #include "pub_core_machine.h" // VG_(get_SP)
45 #include "pub_core_mallocfree.h"
46 #include "pub_core_options.h"
47 #include "pub_core_tooliface.h"
48 #include "pub_core_signals.h"
49 #include "pub_core_syscall.h"
50 #include "pub_core_syswrap.h"
52 #include "priv_types_n_macros.h"
53 #include "priv_syswrap-generic.h"
54 #include "priv_syswrap-solaris.h"
56 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
57 use 'retaddr' as f's return-to address. Also, clear all the integer
58 registers before entering f. */
59 __attribute__((noreturn
))
60 void ML_(call_on_new_stack_0_1
)(Addr stack
, /* 4(%esp) */
61 Addr retaddr
, /* 8(%esp) */
62 void (*f
)(Word
), /* 12(%esp) */
63 Word arg1
); /* 16(%esp) */
66 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
67 "vgModuleLocal_call_on_new_stack_0_1:\n"
68 " movl %esp, %esi\n" /* remember old stack pointer */
69 " movl 4(%esi), %esp\n" /* set stack */
70 " pushl $0\n" /* align stack */
71 " pushl $0\n" /* align stack */
72 " pushl $0\n" /* align stack */
73 " pushl 16(%esi)\n" /* arg1 to stack */
74 " pushl 8(%esi)\n" /* retaddr to stack */
75 " pushl 12(%esi)\n" /* f to stack */
76 " movl $0, %eax\n" /* zero all GP regs */
83 " ret\n" /* jump to f */
84 " ud2\n" /* should never get here */
88 /* This function is called to setup a context of a new Valgrind thread (which
89 will run the client code). */
90 void ML_(setup_start_thread_context
)(ThreadId tid
, vki_ucontext_t
*uc
)
92 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
93 UWord
*stack
= (UWord
*)tst
->os_state
.valgrind_stack_init_SP
;
94 UShort cs
, ds
, ss
, es
, fs
, gs
;
96 VG_(memset
)(uc
, 0, sizeof(*uc
));
97 uc
->uc_flags
= VKI_UC_CPU
| VKI_UC_SIGMASK
;
99 /* Start the thread with everything blocked. */
100 VG_(sigfillset
)(&uc
->uc_sigmask
);
102 /* Set up the stack, it should be always 16-byte aligned before doing
103 a function call, i.e. the first parameter is also 16-byte aligned. */
104 vg_assert(VG_IS_16_ALIGNED(stack
));
106 stack
[0] = 0; /* bogus return value */
107 stack
[1] = (UWord
)tst
; /* the parameter */
109 /* Set up the registers. */
110 uc
->uc_mcontext
.gregs
[VKI_EIP
] = (UWord
)ML_(start_thread_NORETURN
);
111 uc
->uc_mcontext
.gregs
[VKI_UESP
] = (UWord
)stack
;
113 /* Copy segment registers. */
114 __asm__
__volatile__(
121 : [cs
] "=m" (cs
), [ds
] "=m" (ds
), [ss
] "=m" (ss
), [es
] "=m" (es
),
122 [fs
] "=m" (fs
), [gs
] "=m" (gs
));
123 uc
->uc_mcontext
.gregs
[VKI_CS
] = cs
;
124 uc
->uc_mcontext
.gregs
[VKI_DS
] = ds
;
125 uc
->uc_mcontext
.gregs
[VKI_SS
] = ss
;
126 uc
->uc_mcontext
.gregs
[VKI_ES
] = es
;
127 uc
->uc_mcontext
.gregs
[VKI_FS
] = fs
;
128 uc
->uc_mcontext
.gregs
[VKI_GS
] = gs
;
131 /* Architecture-specific part of VG_(save_context). */
132 void ML_(save_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
135 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
136 struct vki_fpchip_state
*fs
137 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
141 /* Common registers */
142 uc
->uc_mcontext
.gregs
[VKI_EIP
] = tst
->arch
.vex
.guest_EIP
;
143 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EIP
,
144 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EIP
], sizeof(UWord
));
145 uc
->uc_mcontext
.gregs
[VKI_EAX
] = tst
->arch
.vex
.guest_EAX
;
146 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EAX
,
147 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EAX
], sizeof(UWord
));
148 uc
->uc_mcontext
.gregs
[VKI_EBX
] = tst
->arch
.vex
.guest_EBX
;
149 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EBX
,
150 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBX
], sizeof(UWord
));
151 uc
->uc_mcontext
.gregs
[VKI_ECX
] = tst
->arch
.vex
.guest_ECX
;
152 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ECX
,
153 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ECX
], sizeof(UWord
));
154 uc
->uc_mcontext
.gregs
[VKI_EDX
] = tst
->arch
.vex
.guest_EDX
;
155 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EDX
,
156 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDX
], sizeof(UWord
));
157 uc
->uc_mcontext
.gregs
[VKI_EBP
] = tst
->arch
.vex
.guest_EBP
;
158 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EBP
,
159 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBP
], sizeof(UWord
));
160 uc
->uc_mcontext
.gregs
[VKI_ESI
] = tst
->arch
.vex
.guest_ESI
;
161 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ESI
,
162 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESI
], sizeof(UWord
));
163 uc
->uc_mcontext
.gregs
[VKI_EDI
] = tst
->arch
.vex
.guest_EDI
;
164 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EDI
,
165 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDI
], sizeof(UWord
));
166 uc
->uc_mcontext
.gregs
[VKI_UESP
] = tst
->arch
.vex
.guest_ESP
;
167 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ESP
,
168 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_UESP
], sizeof(UWord
));
169 uc
->uc_mcontext
.gregs
[VKI_ESP
] = 0;
170 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESP
],
174 uc
->uc_mcontext
.gregs
[VKI_ERR
] = 0;
175 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ERR
],
177 uc
->uc_mcontext
.gregs
[VKI_TRAPNO
] = 0;
178 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_TRAPNO
],
181 /* Segment registers */
182 /* Note that segment registers are 16b in VEX, but 32b in mcontext. Thus
183 we tell a tool that the lower 16 bits were copied and that the higher 16
184 bits were set (to zero). (This assumes a little-endian
186 uc
->uc_mcontext
.gregs
[VKI_CS
] = tst
->arch
.vex
.guest_CS
;
187 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_CS
,
188 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_CS
], sizeof(UShort
));
189 VG_TRACK(post_mem_write
, part
, tid
,
190 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_CS
]) + 2, sizeof(UShort
));
191 uc
->uc_mcontext
.gregs
[VKI_DS
] = tst
->arch
.vex
.guest_DS
;
192 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_DS
,
193 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_DS
], sizeof(UShort
));
194 VG_TRACK(post_mem_write
, part
, tid
,
195 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_DS
]) + 2, sizeof(UShort
));
196 uc
->uc_mcontext
.gregs
[VKI_SS
] = tst
->arch
.vex
.guest_SS
;
197 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_SS
,
198 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_SS
], sizeof(UShort
));
199 VG_TRACK(post_mem_write
, part
, tid
,
200 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_SS
]) + 2, sizeof(UShort
));
201 uc
->uc_mcontext
.gregs
[VKI_ES
] = tst
->arch
.vex
.guest_ES
;
202 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ES
,
203 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ES
], sizeof(UShort
));
204 VG_TRACK(post_mem_write
, part
, tid
,
205 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_ES
]) + 2, sizeof(UShort
));
206 uc
->uc_mcontext
.gregs
[VKI_FS
] = tst
->arch
.vex
.guest_FS
;
207 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_FS
,
208 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_FS
], sizeof(UShort
));
209 VG_TRACK(post_mem_write
, part
, tid
,
210 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_FS
]) + 2, sizeof(UShort
));
211 uc
->uc_mcontext
.gregs
[VKI_GS
] = tst
->arch
.vex
.guest_GS
;
212 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_GS
,
213 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_GS
], sizeof(UShort
));
214 VG_TRACK(post_mem_write
, part
, tid
,
215 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_GS
]) + 2, sizeof(UShort
));
217 /* Handle eflags (optimistically make all flags defined). */
218 uc
->uc_mcontext
.gregs
[VKI_EFL
] =
219 LibVEX_GuestX86_get_eflags(&tst
->arch
.vex
);
220 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EFL
],
222 /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
223 CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
224 values. The *FLAG values represent one-bit information and are saved
225 without loss of precision into eflags. However when CC_* values are
226 converted into eflags then precision is lost. What we do here is to
227 save unmodified CC_* values into unused ucontext members (the 'long
228 uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
229 context in ML_(restore_machine_context)() without the loss of precision.
230 This imposes a requirement on client programs to not use these two
231 members. Luckily this is never a case in Solaris-gate programs and
233 /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
234 that we just defined uc_filler[0,1]. This helps if someone uses an
235 uninitialized ucontext and tries to read (use) uc_filler[0,1]. Memcheck
236 in such a case should detect this error. */
237 VKI_UC_GUEST_CC_OP(uc
) = tst
->arch
.vex
.guest_CC_OP
;
238 VKI_UC_GUEST_CC_NDEP(uc
) = tst
->arch
.vex
.guest_CC_NDEP
;
239 /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
240 a tool about this copy. */
241 VKI_UC_GUEST_CC_DEP1(uc
) = tst
->arch
.vex
.guest_CC_DEP1
;
242 VG_TRACK(copy_reg_to_mem
, part
, tid
,
243 offsetof(VexGuestX86State
, guest_CC_DEP1
),
244 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
), sizeof(UWord
));
245 VKI_UC_GUEST_CC_DEP2(uc
) = tst
->arch
.vex
.guest_CC_DEP2
;
246 VG_TRACK(copy_reg_to_mem
, part
, tid
,
247 offsetof(VexGuestX86State
, guest_CC_DEP2
),
248 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
), sizeof(UWord
));
249 /* Make another copy of eflags. */
250 VKI_UC_GUEST_EFLAGS_NEG(uc
) = ~uc
->uc_mcontext
.gregs
[VKI_EFL
];
251 /* Calculate a checksum. */
256 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
257 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
258 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
259 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
260 buf
[4] = uc
->uc_mcontext
.gregs
[VKI_EFL
];
261 checksum
= ML_(fletcher32
)((UShort
*)&buf
, sizeof(buf
) / sizeof(UShort
));
262 /* Store the checksum. */
263 VKI_UC_GUEST_EFLAGS_CHECKSUM(uc
) = checksum
;
268 vg_assert(sizeof(fs
->state
) == 108);
269 LibVEX_GuestX86_get_x87(&tst
->arch
.vex
, (UChar
*)&fs
->state
);
271 /* Flags and control words */
272 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->state
, 28);
274 for (i
= 0; i
< 8; i
++) {
275 Addr addr
= (Addr
)&fs
->state
+ 28 + i
* 10;
276 /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
277 have to lie here. :< */
278 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
279 guest_FPREG
[i
]), addr
, sizeof(ULong
));
280 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
281 guest_FPREG
[i
]), addr
+ 8, sizeof(UShort
));
284 /* Status word (sw) at exception */
286 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->status
, sizeof(fs
->status
));
289 fs
->mxcsr
= LibVEX_GuestX86_get_mxcsr(&tst
->arch
.vex
);
290 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
292 /* MXCSR at exception */
294 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->xstatus
,
295 sizeof(fs
->xstatus
));
298 #define COPY_OUT_XMM(dest, src) \
300 dest._l[0] = src[0]; \
301 dest._l[1] = src[1]; \
302 dest._l[2] = src[2]; \
303 dest._l[3] = src[3]; \
305 COPY_OUT_XMM(fs
->xmm
[0], tst
->arch
.vex
.guest_XMM0
);
306 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
307 guest_XMM0
), (Addr
)&fs
->xmm
[0], sizeof(U128
));
308 COPY_OUT_XMM(fs
->xmm
[1], tst
->arch
.vex
.guest_XMM1
);
309 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
310 guest_XMM1
), (Addr
)&fs
->xmm
[1], sizeof(U128
));
311 COPY_OUT_XMM(fs
->xmm
[2], tst
->arch
.vex
.guest_XMM2
);
312 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
313 guest_XMM2
), (Addr
)&fs
->xmm
[2], sizeof(U128
));
314 COPY_OUT_XMM(fs
->xmm
[3], tst
->arch
.vex
.guest_XMM3
);
315 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
316 guest_XMM3
), (Addr
)&fs
->xmm
[3], sizeof(U128
));
317 COPY_OUT_XMM(fs
->xmm
[4], tst
->arch
.vex
.guest_XMM4
);
318 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
319 guest_XMM4
), (Addr
)&fs
->xmm
[4], sizeof(U128
));
320 COPY_OUT_XMM(fs
->xmm
[5], tst
->arch
.vex
.guest_XMM5
);
321 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
322 guest_XMM5
), (Addr
)&fs
->xmm
[5], sizeof(U128
));
323 COPY_OUT_XMM(fs
->xmm
[6], tst
->arch
.vex
.guest_XMM6
);
324 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
325 guest_XMM6
), (Addr
)&fs
->xmm
[6], sizeof(U128
));
326 COPY_OUT_XMM(fs
->xmm
[7], tst
->arch
.vex
.guest_XMM7
);
327 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
328 guest_XMM7
), (Addr
)&fs
->xmm
[7], sizeof(U128
));
332 /* Architecture-specific part of VG_(restore_context). */
333 void ML_(restore_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
334 CorePart part
, Bool esp_is_thrptr
)
336 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
337 struct vki_fpchip_state
*fs
338 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
341 if (uc
->uc_flags
& VKI_UC_CPU
) {
342 /* Common registers */
343 tst
->arch
.vex
.guest_EIP
= uc
->uc_mcontext
.gregs
[VKI_EIP
];
344 VG_TRACK(copy_mem_to_reg
, part
, tid
,
345 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EIP
], OFFSET_x86_EIP
,
347 tst
->arch
.vex
.guest_EAX
= uc
->uc_mcontext
.gregs
[VKI_EAX
];
348 VG_TRACK(copy_mem_to_reg
, part
, tid
,
349 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EAX
], OFFSET_x86_EAX
,
351 tst
->arch
.vex
.guest_EBX
= uc
->uc_mcontext
.gregs
[VKI_EBX
];
352 VG_TRACK(copy_mem_to_reg
, part
, tid
,
353 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBX
], OFFSET_x86_EBX
,
355 tst
->arch
.vex
.guest_ECX
= uc
->uc_mcontext
.gregs
[VKI_ECX
];
356 VG_TRACK(copy_mem_to_reg
, part
, tid
,
357 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ECX
], OFFSET_x86_ECX
,
359 tst
->arch
.vex
.guest_EDX
= uc
->uc_mcontext
.gregs
[VKI_EDX
];
360 VG_TRACK(copy_mem_to_reg
, part
, tid
,
361 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDX
], OFFSET_x86_EDX
,
363 tst
->arch
.vex
.guest_EBP
= uc
->uc_mcontext
.gregs
[VKI_EBP
];
364 VG_TRACK(copy_mem_to_reg
, part
, tid
,
365 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBP
], OFFSET_x86_EBP
,
367 tst
->arch
.vex
.guest_ESI
= uc
->uc_mcontext
.gregs
[VKI_ESI
];
368 VG_TRACK(copy_mem_to_reg
, part
, tid
,
369 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESI
], OFFSET_x86_ESI
,
371 tst
->arch
.vex
.guest_EDI
= uc
->uc_mcontext
.gregs
[VKI_EDI
];
372 VG_TRACK(copy_mem_to_reg
, part
, tid
,
373 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDI
], OFFSET_x86_EDI
,
375 tst
->arch
.vex
.guest_ESP
= uc
->uc_mcontext
.gregs
[VKI_UESP
];
376 VG_TRACK(copy_mem_to_reg
, part
, tid
,
377 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_UESP
], OFFSET_x86_ESP
,
381 /* The thrptr value is passed by libc to the kernel in the otherwise
382 unused ESP field. This is used when a new thread is created. */
383 VG_TRACK(pre_mem_read
, part
, tid
,
384 "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
385 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESP
], sizeof(UWord
));
386 if (uc
->uc_mcontext
.gregs
[VKI_ESP
]) {
387 tst
->os_state
.thrptr
= uc
->uc_mcontext
.gregs
[VKI_ESP
];
388 ML_(update_gdt_lwpgs
)(tid
);
392 /* Ignore ERR and TRAPNO. */
394 /* Segment registers */
395 tst
->arch
.vex
.guest_CS
= uc
->uc_mcontext
.gregs
[VKI_CS
];
396 VG_TRACK(copy_mem_to_reg
, part
, tid
,
397 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_CS
], OFFSET_x86_CS
,
399 tst
->arch
.vex
.guest_DS
= uc
->uc_mcontext
.gregs
[VKI_DS
];
400 VG_TRACK(copy_mem_to_reg
, part
, tid
,
401 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_DS
], OFFSET_x86_DS
,
403 tst
->arch
.vex
.guest_SS
= uc
->uc_mcontext
.gregs
[VKI_SS
];
404 VG_TRACK(copy_mem_to_reg
, part
, tid
,
405 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_SS
], OFFSET_x86_SS
,
407 tst
->arch
.vex
.guest_ES
= uc
->uc_mcontext
.gregs
[VKI_ES
];
408 VG_TRACK(copy_mem_to_reg
, part
, tid
,
409 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ES
], OFFSET_x86_ES
,
411 tst
->arch
.vex
.guest_FS
= uc
->uc_mcontext
.gregs
[VKI_FS
];
412 VG_TRACK(copy_mem_to_reg
, part
, tid
,
413 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_FS
], OFFSET_x86_FS
,
415 tst
->arch
.vex
.guest_GS
= uc
->uc_mcontext
.gregs
[VKI_GS
];
416 VG_TRACK(copy_mem_to_reg
, part
, tid
,
417 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_GS
], OFFSET_x86_GS
,
425 Bool ok_restore
= False
;
427 VG_TRACK(pre_mem_read
, part
, tid
,
428 "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
429 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EFL
], sizeof(UWord
));
430 eflags
= uc
->uc_mcontext
.gregs
[VKI_EFL
];
431 orig_eflags
= LibVEX_GuestX86_get_eflags(&tst
->arch
.vex
);
433 /* The kernel disallows the ID flag to be changed via the setcontext
434 call, thus do the same. */
435 if (orig_eflags
& VKI_EFLAGS_ID_BIT
)
436 new_eflags
|= VKI_EFLAGS_ID_BIT
;
438 new_eflags
&= ~VKI_EFLAGS_ID_BIT
;
439 LibVEX_GuestX86_put_eflags(new_eflags
, &tst
->arch
.vex
);
440 VG_TRACK(post_reg_write
, part
, tid
,
441 offsetof(VexGuestX86State
, guest_CC_DEP1
), sizeof(UWord
));
442 VG_TRACK(post_reg_write
, part
, tid
,
443 offsetof(VexGuestX86State
, guest_CC_DEP2
), sizeof(UWord
));
445 /* Check if this context was created by us in VG_(save_context). In
446 that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
447 values which we previously stashed into unused members of the
449 if (eflags
!= ~VKI_UC_GUEST_EFLAGS_NEG(uc
)) {
450 VG_(debugLog
)(1, "syswrap-solaris",
451 "The eflags value was restored from an "
452 "explicitly set value in thread %u.\n", tid
);
459 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
460 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
461 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
462 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
464 checksum
= ML_(fletcher32
)((UShort
*)&buf
,
465 sizeof(buf
) / sizeof(UShort
));
466 if (checksum
== VKI_UC_GUEST_EFLAGS_CHECKSUM(uc
)) {
467 /* Check ok, the full restoration is possible. */
468 VG_(debugLog
)(1, "syswrap-solaris",
469 "The CC_* guest state values were fully "
470 "restored in thread %u.\n", tid
);
473 tst
->arch
.vex
.guest_CC_OP
= VKI_UC_GUEST_CC_OP(uc
);
474 tst
->arch
.vex
.guest_CC_NDEP
= VKI_UC_GUEST_CC_NDEP(uc
);
475 tst
->arch
.vex
.guest_CC_DEP1
= VKI_UC_GUEST_CC_DEP1(uc
);
476 VG_TRACK(copy_mem_to_reg
, part
, tid
,
477 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
),
478 offsetof(VexGuestX86State
, guest_CC_DEP1
),
480 tst
->arch
.vex
.guest_CC_DEP2
= VKI_UC_GUEST_CC_DEP2(uc
);
481 VG_TRACK(copy_mem_to_reg
, part
, tid
,
482 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
),
483 offsetof(VexGuestX86State
, guest_CC_DEP2
),
489 VG_(debugLog
)(1, "syswrap-solaris",
490 "Cannot fully restore the CC_* guest state "
491 "values, using approximate eflags in thread "
496 if (uc
->uc_flags
& VKI_UC_FPU
) {
502 /* Flags and control words */
503 VG_TRACK(pre_mem_read
, part
, tid
,
504 "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
505 (Addr
)&fs
->state
, 28);
507 for (i
= 0; i
< 8; i
++) {
508 Addr addr
= (Addr
)&fs
->state
+ 28 + i
* 10;
509 VG_TRACK(copy_mem_to_reg
, part
, tid
, addr
,
510 offsetof(VexGuestX86State
, guest_FPREG
[i
]), sizeof(ULong
));
512 note
= LibVEX_GuestX86_put_x87((UChar
*)&fs
->state
, &tst
->arch
.vex
);
513 if (note
!= EmNote_NONE
)
514 VG_(message
)(Vg_UserMsg
,
515 "Error restoring x87 state in thread %u: %s.\n",
516 tid
, LibVEX_EmNote_string(note
));
519 VG_TRACK(pre_mem_read
, part
, tid
,
520 "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
521 (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
522 note
= LibVEX_GuestX86_put_mxcsr(fs
->mxcsr
, &tst
->arch
.vex
);
523 if (note
!= EmNote_NONE
)
524 VG_(message
)(Vg_UserMsg
,
525 "Error restoring mxcsr state in thread %u: %s.\n",
526 tid
, LibVEX_EmNote_string(note
));
528 #define COPY_IN_XMM(src, dest) \
530 dest[0] = src._l[0]; \
531 dest[1] = src._l[1]; \
532 dest[2] = src._l[2]; \
533 dest[3] = src._l[3]; \
535 COPY_IN_XMM(fs
->xmm
[0], tst
->arch
.vex
.guest_XMM0
);
536 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[0],
537 offsetof(VexGuestX86State
, guest_XMM0
), sizeof(U128
));
538 COPY_IN_XMM(fs
->xmm
[1], tst
->arch
.vex
.guest_XMM1
);
539 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[1],
540 offsetof(VexGuestX86State
, guest_XMM1
), sizeof(U128
));
541 COPY_IN_XMM(fs
->xmm
[2], tst
->arch
.vex
.guest_XMM2
);
542 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[2],
543 offsetof(VexGuestX86State
, guest_XMM2
), sizeof(U128
));
544 COPY_IN_XMM(fs
->xmm
[3], tst
->arch
.vex
.guest_XMM3
);
545 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[3],
546 offsetof(VexGuestX86State
, guest_XMM3
), sizeof(U128
));
547 COPY_IN_XMM(fs
->xmm
[4], tst
->arch
.vex
.guest_XMM4
);
548 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[4],
549 offsetof(VexGuestX86State
, guest_XMM4
), sizeof(U128
));
550 COPY_IN_XMM(fs
->xmm
[5], tst
->arch
.vex
.guest_XMM5
);
551 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[5],
552 offsetof(VexGuestX86State
, guest_XMM5
), sizeof(U128
));
553 COPY_IN_XMM(fs
->xmm
[6], tst
->arch
.vex
.guest_XMM6
);
554 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[6],
555 offsetof(VexGuestX86State
, guest_XMM6
), sizeof(U128
));
556 COPY_IN_XMM(fs
->xmm
[7], tst
->arch
.vex
.guest_XMM7
);
557 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[7],
558 offsetof(VexGuestX86State
, guest_XMM7
), sizeof(U128
));
563 /* Allocate GDT for a given thread. */
564 void ML_(setup_gdt
)(VexGuestX86State
*vex
)
566 Addr gdt
= (Addr
)VG_(calloc
)("syswrap-solaris-x86.gdt",
567 VEX_GUEST_X86_GDT_NENT
,
568 sizeof(VexGuestX86SegDescr
));
569 vex
->guest_GDT
= gdt
;
572 /* Deallocate GDT for a given thread. */
573 void ML_(cleanup_gdt
)(VexGuestX86State
*vex
)
577 VG_(free
)((void *) (HWord
) vex
->guest_GDT
);
581 /* For a given thread, update the LWPGS descriptor in the thread's GDT
582 according to the thread pointer. */
583 void ML_(update_gdt_lwpgs
)(ThreadId tid
)
585 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
586 Addr base
= tst
->os_state
.thrptr
;
587 VexGuestX86SegDescr
*gdt
588 = (VexGuestX86SegDescr
*) (HWord
) tst
->arch
.vex
.guest_GDT
;
589 VexGuestX86SegDescr desc
;
593 VG_(memset
)(&desc
, 0, sizeof(desc
));
595 desc
.LdtEnt
.Bits
.LimitLow
= -1;
596 desc
.LdtEnt
.Bits
.LimitHi
= -1;
597 desc
.LdtEnt
.Bits
.BaseLow
= base
& 0xffff;
598 desc
.LdtEnt
.Bits
.BaseMid
= (base
>> 16) & 0xff;
599 desc
.LdtEnt
.Bits
.BaseHi
= (base
>> 24) & 0xff;
600 desc
.LdtEnt
.Bits
.Pres
= 1;
601 desc
.LdtEnt
.Bits
.Dpl
= 3; /* SEL_UPL */
602 desc
.LdtEnt
.Bits
.Type
= 19; /* SDT_MEMRWA */
603 desc
.LdtEnt
.Bits
.Granularity
= 1; /* SDP_PAGES */
604 desc
.LdtEnt
.Bits
.Default_Big
= 1; /* SDP_OP32 */
607 gdt
[VKI_GDT_LWPGS
] = desc
;
610 tst
->arch
.vex
.guest_GS
= VKI_LWPGS_SEL
;
611 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_x86_GS
,
616 /* ---------------------------------------------------------------------
617 PRE/POST wrappers for x86/Solaris-specific syscalls
618 ------------------------------------------------------------------ */
620 #define PRE(name) DEFN_PRE_TEMPLATE(x86_solaris, name)
621 #define POST(name) DEFN_POST_TEMPLATE(x86_solaris, name)
627 /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
629 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", SARG1
, ARG2
,
630 (HChar
*)ARG2
, ARG3
, SARG4
);
631 PRE_REG_READ4(long, "fstatat64", int, fildes
, const char *, path
,
632 struct stat64
*, buf
, int, flag
);
634 PRE_MEM_RASCIIZ("fstatat64(path)", ARG2
);
635 PRE_MEM_WRITE("fstatat64(buf)", ARG3
, sizeof(struct vki_stat64
));
638 if (ARG1
!= VKI_AT_FDCWD
&&
639 !ML_(fd_allowed
)(ARG1
, "fstatat64", tid
, False
))
640 SET_STATUS_Failure(VKI_EBADF
);
645 POST_MEM_WRITE(ARG3
, sizeof(struct vki_stat64
));
650 /* int openat64(int fildes, const char *filename, int flags);
651 int openat64(int fildes, const char *filename, int flags, mode_t mode);
653 *flags
|= SfMayBlock
;
655 if (ARG3
& VKI_O_CREAT
) {
657 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", SARG1
, ARG2
,
658 (HChar
*)ARG2
, SARG3
, SARG4
);
659 PRE_REG_READ4(long, "openat64", int, fildes
, const char *, filename
,
660 int, flags
, vki_mode_t
, mode
);
664 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", SARG1
, ARG2
, (HChar
*)ARG2
,
666 PRE_REG_READ3(long, "openat64", int, fildes
, const char *, filename
,
670 PRE_MEM_RASCIIZ("openat64(filename)", ARG2
);
673 if (ARG1
!= VKI_AT_FDCWD
&& !ML_(fd_allowed
)(ARG1
, "openat64", tid
, False
))
674 SET_STATUS_Failure(VKI_EBADF
);
679 if (!ML_(fd_allowed
)(RES
, "openat64", tid
, True
)) {
681 SET_STATUS_Failure(VKI_EMFILE
);
683 else if (VG_(clo_track_fds
))
684 ML_(record_fd_open_with_given_name
)(tid
, RES
, (HChar
*)ARG2
);
689 /* offset_t llseek(int fildes, offset_t offset, int whence); */
690 PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", SARG1
, ARG2
, ARG3
, SARG4
);
691 PRE_REG_READ4(long, "llseek", int, fildes
, vki_u32
, offset_low
,
692 vki_u32
, offset_high
, int, whence
);
695 if (!ML_(fd_allowed
)(ARG1
, "llseek", tid
, False
))
696 SET_STATUS_Failure(VKI_EBADF
);
701 /* void *mmap64(void *addr, size_t len, int prot, int flags,
702 int fildes, uint32_t offlo, uint32_t offhi); */
703 /* Note this wrapper assumes a little-endian architecture, offlo and offhi
704 have to be swapped if a big-endian architecture is present. */
705 #if !defined(VG_LITTLEENDIAN)
706 #error "Unexpected endianness."
707 #endif /* !VG_LITTLEENDIAN */
714 vg_assert(VKI_PAGE_SIZE
== 4096);
715 vg_assert(sizeof(u
) == sizeof(offset
));
717 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
718 ARG1
, ARG2
, ARG3
, ARG4
, SARG5
, ARG6
, ARG7
);
719 PRE_REG_READ7(long, "mmap", void *, start
, vki_size_t
, length
,
720 int, prot
, int, flags
, int, fd
, uint32_t, offlo
,
723 /* The offlo and offhi values can actually represent a negative value.
724 Make sure it's passed correctly to the generic mmap wrapper. */
725 u
= ((ULong
)ARG7
<< 32) + ARG6
;
726 offset
= *(Off64T
*)&u
;
728 r
= ML_(generic_PRE_sys_mmap
)(tid
, ARG1
, ARG2
, ARG3
, ARG4
, ARG5
, offset
);
729 SET_STATUS_from_SysRes(r
);
734 /* int stat64(const char *path, struct stat64 *buf); */
735 PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
736 PRE_REG_READ2(long, "stat64", const char *, path
, struct stat64
*, buf
);
738 PRE_MEM_RASCIIZ("stat64(path)", ARG1
);
739 PRE_MEM_WRITE("stat64(buf)", ARG2
, sizeof(struct vki_stat64
));
744 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
749 /* int lstat64(const char *path, struct stat64 *buf); */
750 PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
751 PRE_REG_READ2(long, "lstat64", const char *, path
, struct stat64
*, buf
);
753 PRE_MEM_RASCIIZ("lstat64(path)", ARG1
);
754 PRE_MEM_WRITE("lstat64(buf)", ARG2
, sizeof(struct vki_stat64
));
759 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
764 /* int fstat64(int fildes, struct stat64 *buf); */
765 PRINT("sys_fstat64 ( %ld, %#lx )", SARG1
, ARG2
);
766 PRE_REG_READ2(long, "fstat64", int, fildes
, struct stat64
*, buf
);
767 PRE_MEM_WRITE("fstat64(buf)", ARG2
, sizeof(struct vki_stat64
));
770 if (!ML_(fd_allowed
)(ARG1
, "fstat64", tid
, False
))
771 SET_STATUS_Failure(VKI_EBADF
);
776 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
779 static void do_statvfs64_post(struct vki_statvfs64
*stats
, ThreadId tid
)
781 POST_FIELD_WRITE(stats
->f_bsize
);
782 POST_FIELD_WRITE(stats
->f_frsize
);
783 POST_FIELD_WRITE(stats
->f_blocks
);
784 POST_FIELD_WRITE(stats
->f_bfree
);
785 POST_FIELD_WRITE(stats
->f_bavail
);
786 POST_FIELD_WRITE(stats
->f_files
);
787 POST_FIELD_WRITE(stats
->f_ffree
);
788 POST_FIELD_WRITE(stats
->f_favail
);
789 POST_FIELD_WRITE(stats
->f_fsid
);
790 POST_MEM_WRITE((Addr
) stats
->f_basetype
, VG_(strlen
)(stats
->f_basetype
) + 1);
791 POST_FIELD_WRITE(stats
->f_flag
);
792 POST_FIELD_WRITE(stats
->f_namemax
);
793 POST_MEM_WRITE((Addr
) stats
->f_fstr
, VG_(strlen
)(stats
->f_fstr
) + 1);
798 /* int statvfs64(const char *path, struct statvfs64 *buf); */
799 *flags
|= SfMayBlock
;
800 PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*) ARG1
, ARG2
);
801 PRE_REG_READ2(long, "statvfs64", const char *, path
,
802 struct vki_statvfs64
*, buf
);
803 PRE_MEM_RASCIIZ("statvfs64(path)", ARG1
);
804 PRE_MEM_WRITE("statvfs64(buf)", ARG2
, sizeof(struct vki_statvfs64
));
809 do_statvfs64_post((struct vki_statvfs64
*) ARG2
, tid
);
814 /* int fstatvfs64(int fd, struct statvfs64 *buf); */
815 *flags
|= SfMayBlock
;
816 PRINT("sys_fstatvfs64 ( %ld, %#lx )", SARG1
, ARG2
);
817 PRE_REG_READ2(long, "fstatvfs64", int, fd
, struct vki_statvfs64
*, buf
);
818 PRE_MEM_WRITE("fstatvfs64(buf)", ARG2
, sizeof(struct vki_statvfs64
));
821 if (!ML_(fd_allowed
)(ARG1
, "fstatvfs64", tid
, False
))
822 SET_STATUS_Failure(VKI_EBADF
);
827 do_statvfs64_post((struct vki_statvfs64
*) ARG2
, tid
);
832 /* int setrlimit64(int resource, struct rlimit64 *rlim); */
833 struct vki_rlimit64
*limit
= (struct vki_rlimit64
*)ARG2
;
834 PRINT("sys_setrlimit64 ( %ld, %#lx )", SARG1
, ARG2
);
835 PRE_REG_READ2(long, "setrlimit64", int, resource
, struct rlimit64
*, rlim
);
836 PRE_MEM_READ("setrlimit64(rlim)", ARG2
, sizeof(struct vki_rlimit64
));
838 if (limit
&& limit
->rlim_cur
> limit
->rlim_max
)
839 SET_STATUS_Failure(VKI_EINVAL
);
840 else if (ARG1
== VKI_RLIMIT_NOFILE
) {
841 if (limit
->rlim_cur
> VG_(fd_hard_limit
) ||
842 limit
->rlim_max
!= VG_(fd_hard_limit
)) {
843 SET_STATUS_Failure(VKI_EPERM
);
846 VG_(fd_soft_limit
) = limit
->rlim_cur
;
847 SET_STATUS_Success(0);
850 else if (ARG1
== VKI_RLIMIT_DATA
) {
851 if (limit
->rlim_cur
> VG_(client_rlimit_data
).rlim_max
||
852 limit
->rlim_max
> VG_(client_rlimit_data
).rlim_max
) {
853 SET_STATUS_Failure(VKI_EPERM
);
856 VG_(client_rlimit_data
).rlim_max
= limit
->rlim_max
;
857 VG_(client_rlimit_data
).rlim_cur
= limit
->rlim_cur
;
858 SET_STATUS_Success(0);
861 else if (ARG1
== VKI_RLIMIT_STACK
&& tid
== 1) {
862 if (limit
->rlim_cur
> VG_(client_rlimit_stack
).rlim_max
||
863 limit
->rlim_max
> VG_(client_rlimit_stack
).rlim_max
) {
864 SET_STATUS_Failure(VKI_EPERM
);
867 /* Change the value of client_stack_szB to the rlim_cur value but
868 only if it is smaller than the size of the allocated stack for the
870 if (limit
->rlim_cur
<= VG_(clstk_max_size
))
871 VG_(threads
)[tid
].client_stack_szB
= limit
->rlim_cur
;
873 VG_(client_rlimit_stack
).rlim_max
= limit
->rlim_max
;
874 VG_(client_rlimit_stack
).rlim_cur
= limit
->rlim_cur
;
875 SET_STATUS_Success(0);
882 /* int getrlimit64(int resource, struct rlimit64 *rlim); */
883 PRINT("sys_getrlimit64 ( %ld, %#lx )", SARG1
, ARG2
);
884 PRE_REG_READ2(long, "getrlimit64",
885 int, resource
, struct rlimit64
*, rlim
);
886 PRE_MEM_WRITE("getrlimit64(rlim)", ARG2
, sizeof(struct vki_rlimit64
));
889 POST(sys_getrlimit64
)
891 /* Based on common_post_getrlimit() from syswrap-generic.c. */
892 struct vki_rlimit64
*rlim
= (struct vki_rlimit64
*)ARG2
;
894 POST_MEM_WRITE(ARG2
, sizeof(struct vki_rlimit64
));
896 switch (ARG1
/*resource*/) {
897 case VKI_RLIMIT_NOFILE
:
898 rlim
->rlim_cur
= VG_(fd_soft_limit
);
899 rlim
->rlim_max
= VG_(fd_hard_limit
);
901 case VKI_RLIMIT_DATA
:
902 rlim
->rlim_cur
= VG_(client_rlimit_data
).rlim_cur
;
903 rlim
->rlim_max
= VG_(client_rlimit_data
).rlim_max
;
905 case VKI_RLIMIT_STACK
:
906 rlim
->rlim_cur
= VG_(client_rlimit_stack
).rlim_cur
;
907 rlim
->rlim_max
= VG_(client_rlimit_stack
).rlim_max
;
914 /* ssize32_t pread64(int fd, void *buf, size32_t count,
915 uint32_t offset_1, uint32_t offset_2);
917 *flags
|= SfMayBlock
;
918 PRINT("sys_pread64 ( %ld, %#lx, %lu, %#lx, %#lx )",
919 SARG1
, ARG2
, ARG3
, ARG4
, ARG5
);
920 PRE_REG_READ5(long, "pread64", int, fd
, void *, buf
, vki_size32_t
, count
,
921 vki_uint32_t
, offset_1
, vki_uint32_t
, offset_2
);
922 PRE_MEM_WRITE("pread64(buf)", ARG2
, ARG3
);
925 if (!ML_(fd_allowed
)(ARG1
, "pread64", tid
, False
))
926 SET_STATUS_Failure(VKI_EBADF
);
931 POST_MEM_WRITE(ARG2
, RES
);
936 /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
937 uint32_t offset_1, uint32_t offset_2);
939 *flags
|= SfMayBlock
;
940 PRINT("sys_pwrite64 ( %ld, %#lx, %lu, %#lx, %#lx )",
941 SARG1
, ARG2
, ARG3
, ARG4
, ARG5
);
942 PRE_REG_READ5(long, "pwrite64", int, fd
, void *, buf
, vki_size32_t
, count
,
943 vki_uint32_t
, offset_1
, vki_uint32_t
, offset_2
);
944 PRE_MEM_READ("pwrite64(buf)", ARG2
, ARG3
);
947 if (!ML_(fd_allowed
)(ARG1
, "pwrite64", tid
, False
))
948 SET_STATUS_Failure(VKI_EBADF
);
953 /* int open64(const char *filename, int flags);
954 int open64(const char *filename, int flags, mode_t mode); */
955 *flags
|= SfMayBlock
;
957 if (ARG2
& VKI_O_CREAT
) {
959 PRINT("sys_open64 ( %#lx(%s), %#lx, %ld )", ARG1
, (HChar
*)ARG1
, ARG2
,
961 PRE_REG_READ3(long, "open64", const char *, filename
, int, flags
,
966 PRINT("sys_open64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
967 PRE_REG_READ2(long, "open64", const char *, filename
, int, flags
);
969 PRE_MEM_RASCIIZ("open(filename)", ARG1
);
974 if (!ML_(fd_allowed
)(RES
, "open64", tid
, True
)) {
976 SET_STATUS_Failure(VKI_EMFILE
);
978 else if (VG_(clo_track_fds
))
979 ML_(record_fd_open_with_given_name
)(tid
, RES
, (HChar
*)ARG1
);
985 #endif // defined(VGP_x86_solaris)
987 /*--------------------------------------------------------------------*/
989 /*--------------------------------------------------------------------*/