2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2011-2017 Petr Pavlu
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #if defined(VGP_x86_solaris)
33 #include "libvex_guest_offsets.h"
34 #include "pub_core_basics.h"
35 #include "pub_core_vki.h"
36 #include "pub_core_threadstate.h"
37 #include "pub_core_aspacemgr.h"
38 #include "pub_core_xarray.h"
39 #include "pub_core_clientstate.h"
40 #include "pub_core_debuglog.h"
41 #include "pub_core_libcassert.h"
42 #include "pub_core_libcbase.h"
43 #include "pub_core_libcfile.h"
44 #include "pub_core_libcprint.h"
45 #include "pub_core_libcsignal.h"
46 #include "pub_core_machine.h" // VG_(get_SP)
47 #include "pub_core_mallocfree.h"
48 #include "pub_core_options.h"
49 #include "pub_core_tooliface.h"
50 #include "pub_core_signals.h"
51 #include "pub_core_syscall.h"
52 #include "pub_core_syswrap.h"
54 #include "priv_types_n_macros.h"
55 #include "priv_syswrap-generic.h"
56 #include "priv_syswrap-solaris.h"
58 /* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
59 use 'retaddr' as f's return-to address. Also, clear all the integer
60 registers before entering f. */
61 __attribute__((noreturn
))
62 void ML_(call_on_new_stack_0_1
)(Addr stack
, /* 4(%esp) */
63 Addr retaddr
, /* 8(%esp) */
64 void (*f
)(Word
), /* 12(%esp) */
65 Word arg1
); /* 16(%esp) */
68 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
69 "vgModuleLocal_call_on_new_stack_0_1:\n"
70 " movl %esp, %esi\n" /* remember old stack pointer */
71 " movl 4(%esi), %esp\n" /* set stack */
72 " pushl $0\n" /* align stack */
73 " pushl $0\n" /* align stack */
74 " pushl $0\n" /* align stack */
75 " pushl 16(%esi)\n" /* arg1 to stack */
76 " pushl 8(%esi)\n" /* retaddr to stack */
77 " pushl 12(%esi)\n" /* f to stack */
78 " movl $0, %eax\n" /* zero all GP regs */
85 " ret\n" /* jump to f */
86 " ud2\n" /* should never get here */
90 /* This function is called to setup a context of a new Valgrind thread (which
91 will run the client code). */
92 void ML_(setup_start_thread_context
)(ThreadId tid
, vki_ucontext_t
*uc
)
94 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
95 UWord
*stack
= (UWord
*)tst
->os_state
.valgrind_stack_init_SP
;
96 UShort cs
, ds
, ss
, es
, fs
, gs
;
98 VG_(memset
)(uc
, 0, sizeof(*uc
));
99 uc
->uc_flags
= VKI_UC_CPU
| VKI_UC_SIGMASK
;
101 /* Start the thread with everything blocked. */
102 VG_(sigfillset
)(&uc
->uc_sigmask
);
104 /* Set up the stack, it should be always 16-byte aligned before doing
105 a function call, i.e. the first parameter is also 16-byte aligned. */
106 vg_assert(VG_IS_16_ALIGNED(stack
));
108 stack
[0] = 0; /* bogus return value */
109 stack
[1] = (UWord
)tst
; /* the parameter */
111 /* Set up the registers. */
112 uc
->uc_mcontext
.gregs
[VKI_EIP
] = (UWord
)ML_(start_thread_NORETURN
);
113 uc
->uc_mcontext
.gregs
[VKI_UESP
] = (UWord
)stack
;
115 /* Copy segment registers. */
116 __asm__
__volatile__(
123 : [cs
] "=m" (cs
), [ds
] "=m" (ds
), [ss
] "=m" (ss
), [es
] "=m" (es
),
124 [fs
] "=m" (fs
), [gs
] "=m" (gs
));
125 uc
->uc_mcontext
.gregs
[VKI_CS
] = cs
;
126 uc
->uc_mcontext
.gregs
[VKI_DS
] = ds
;
127 uc
->uc_mcontext
.gregs
[VKI_SS
] = ss
;
128 uc
->uc_mcontext
.gregs
[VKI_ES
] = es
;
129 uc
->uc_mcontext
.gregs
[VKI_FS
] = fs
;
130 uc
->uc_mcontext
.gregs
[VKI_GS
] = gs
;
133 /* Architecture-specific part of VG_(save_context). */
134 void ML_(save_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
137 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
138 struct vki_fpchip_state
*fs
139 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
143 /* Common registers */
144 uc
->uc_mcontext
.gregs
[VKI_EIP
] = tst
->arch
.vex
.guest_EIP
;
145 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EIP
,
146 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EIP
], sizeof(UWord
));
147 uc
->uc_mcontext
.gregs
[VKI_EAX
] = tst
->arch
.vex
.guest_EAX
;
148 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EAX
,
149 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EAX
], sizeof(UWord
));
150 uc
->uc_mcontext
.gregs
[VKI_EBX
] = tst
->arch
.vex
.guest_EBX
;
151 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EBX
,
152 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBX
], sizeof(UWord
));
153 uc
->uc_mcontext
.gregs
[VKI_ECX
] = tst
->arch
.vex
.guest_ECX
;
154 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ECX
,
155 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ECX
], sizeof(UWord
));
156 uc
->uc_mcontext
.gregs
[VKI_EDX
] = tst
->arch
.vex
.guest_EDX
;
157 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EDX
,
158 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDX
], sizeof(UWord
));
159 uc
->uc_mcontext
.gregs
[VKI_EBP
] = tst
->arch
.vex
.guest_EBP
;
160 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EBP
,
161 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBP
], sizeof(UWord
));
162 uc
->uc_mcontext
.gregs
[VKI_ESI
] = tst
->arch
.vex
.guest_ESI
;
163 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ESI
,
164 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESI
], sizeof(UWord
));
165 uc
->uc_mcontext
.gregs
[VKI_EDI
] = tst
->arch
.vex
.guest_EDI
;
166 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_EDI
,
167 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDI
], sizeof(UWord
));
168 uc
->uc_mcontext
.gregs
[VKI_UESP
] = tst
->arch
.vex
.guest_ESP
;
169 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ESP
,
170 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_UESP
], sizeof(UWord
));
171 uc
->uc_mcontext
.gregs
[VKI_ESP
] = 0;
172 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESP
],
176 uc
->uc_mcontext
.gregs
[VKI_ERR
] = 0;
177 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ERR
],
179 uc
->uc_mcontext
.gregs
[VKI_TRAPNO
] = 0;
180 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_TRAPNO
],
183 /* Segment registers */
184 /* Note that segment registers are 16b in VEX, but 32b in mcontext. Thus
185 we tell a tool that the lower 16 bits were copied and that the higher 16
186 bits were set (to zero). (This assumes a little-endian
188 uc
->uc_mcontext
.gregs
[VKI_CS
] = tst
->arch
.vex
.guest_CS
;
189 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_CS
,
190 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_CS
], sizeof(UShort
));
191 VG_TRACK(post_mem_write
, part
, tid
,
192 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_CS
]) + 2, sizeof(UShort
));
193 uc
->uc_mcontext
.gregs
[VKI_DS
] = tst
->arch
.vex
.guest_DS
;
194 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_DS
,
195 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_DS
], sizeof(UShort
));
196 VG_TRACK(post_mem_write
, part
, tid
,
197 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_DS
]) + 2, sizeof(UShort
));
198 uc
->uc_mcontext
.gregs
[VKI_SS
] = tst
->arch
.vex
.guest_SS
;
199 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_SS
,
200 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_SS
], sizeof(UShort
));
201 VG_TRACK(post_mem_write
, part
, tid
,
202 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_SS
]) + 2, sizeof(UShort
));
203 uc
->uc_mcontext
.gregs
[VKI_ES
] = tst
->arch
.vex
.guest_ES
;
204 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_ES
,
205 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ES
], sizeof(UShort
));
206 VG_TRACK(post_mem_write
, part
, tid
,
207 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_ES
]) + 2, sizeof(UShort
));
208 uc
->uc_mcontext
.gregs
[VKI_FS
] = tst
->arch
.vex
.guest_FS
;
209 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_FS
,
210 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_FS
], sizeof(UShort
));
211 VG_TRACK(post_mem_write
, part
, tid
,
212 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_FS
]) + 2, sizeof(UShort
));
213 uc
->uc_mcontext
.gregs
[VKI_GS
] = tst
->arch
.vex
.guest_GS
;
214 VG_TRACK(copy_reg_to_mem
, part
, tid
, OFFSET_x86_GS
,
215 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_GS
], sizeof(UShort
));
216 VG_TRACK(post_mem_write
, part
, tid
,
217 (Addr
)(&uc
->uc_mcontext
.gregs
[VKI_GS
]) + 2, sizeof(UShort
));
219 /* Handle eflags (optimistically make all flags defined). */
220 uc
->uc_mcontext
.gregs
[VKI_EFL
] =
221 LibVEX_GuestX86_get_eflags(&tst
->arch
.vex
);
222 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EFL
],
224 /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
225 CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
226 values. The *FLAG values represent one-bit information and are saved
227 without loss of precision into eflags. However when CC_* values are
228 converted into eflags then precision is lost. What we do here is to
229 save unmodified CC_* values into unused ucontext members (the 'long
230 uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
231 context in ML_(restore_machine_context)() without the loss of precision.
232 This imposes a requirement on client programs to not use these two
233 members. Luckily this is never a case in Solaris-gate programs and
235 /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
236 that we just defined uc_filler[0,1]. This helps if someone uses an
237 uninitialized ucontext and tries to read (use) uc_filler[0,1]. Memcheck
238 in such a case should detect this error. */
239 VKI_UC_GUEST_CC_OP(uc
) = tst
->arch
.vex
.guest_CC_OP
;
240 VKI_UC_GUEST_CC_NDEP(uc
) = tst
->arch
.vex
.guest_CC_NDEP
;
241 /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
242 a tool about this copy. */
243 VKI_UC_GUEST_CC_DEP1(uc
) = tst
->arch
.vex
.guest_CC_DEP1
;
244 VG_TRACK(copy_reg_to_mem
, part
, tid
,
245 offsetof(VexGuestX86State
, guest_CC_DEP1
),
246 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
), sizeof(UWord
));
247 VKI_UC_GUEST_CC_DEP2(uc
) = tst
->arch
.vex
.guest_CC_DEP2
;
248 VG_TRACK(copy_reg_to_mem
, part
, tid
,
249 offsetof(VexGuestX86State
, guest_CC_DEP2
),
250 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
), sizeof(UWord
));
251 /* Make another copy of eflags. */
252 VKI_UC_GUEST_EFLAGS_NEG(uc
) = ~uc
->uc_mcontext
.gregs
[VKI_EFL
];
253 /* Calculate a checksum. */
258 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
259 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
260 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
261 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
262 buf
[4] = uc
->uc_mcontext
.gregs
[VKI_EFL
];
263 checksum
= ML_(fletcher32
)((UShort
*)&buf
, sizeof(buf
) / sizeof(UShort
));
264 /* Store the checksum. */
265 VKI_UC_GUEST_EFLAGS_CHECKSUM(uc
) = checksum
;
270 vg_assert(sizeof(fs
->state
) == 108);
271 LibVEX_GuestX86_get_x87(&tst
->arch
.vex
, (UChar
*)&fs
->state
);
273 /* Flags and control words */
274 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->state
, 28);
276 for (i
= 0; i
< 8; i
++) {
277 Addr addr
= (Addr
)&fs
->state
+ 28 + i
* 10;
278 /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
279 have to lie here. :< */
280 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
281 guest_FPREG
[i
]), addr
, sizeof(ULong
));
282 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
283 guest_FPREG
[i
]), addr
+ 8, sizeof(UShort
));
286 /* Status word (sw) at exception */
288 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->status
, sizeof(fs
->status
));
291 fs
->mxcsr
= LibVEX_GuestX86_get_mxcsr(&tst
->arch
.vex
);
292 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
294 /* MXCSR at exception */
296 VG_TRACK(post_mem_write
, part
, tid
, (Addr
)&fs
->xstatus
,
297 sizeof(fs
->xstatus
));
300 #define COPY_OUT_XMM(dest, src) \
302 dest._l[0] = src[0]; \
303 dest._l[1] = src[1]; \
304 dest._l[2] = src[2]; \
305 dest._l[3] = src[3]; \
307 COPY_OUT_XMM(fs
->xmm
[0], tst
->arch
.vex
.guest_XMM0
);
308 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
309 guest_XMM0
), (Addr
)&fs
->xmm
[0], sizeof(U128
));
310 COPY_OUT_XMM(fs
->xmm
[1], tst
->arch
.vex
.guest_XMM1
);
311 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
312 guest_XMM1
), (Addr
)&fs
->xmm
[1], sizeof(U128
));
313 COPY_OUT_XMM(fs
->xmm
[2], tst
->arch
.vex
.guest_XMM2
);
314 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
315 guest_XMM2
), (Addr
)&fs
->xmm
[2], sizeof(U128
));
316 COPY_OUT_XMM(fs
->xmm
[3], tst
->arch
.vex
.guest_XMM3
);
317 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
318 guest_XMM3
), (Addr
)&fs
->xmm
[3], sizeof(U128
));
319 COPY_OUT_XMM(fs
->xmm
[4], tst
->arch
.vex
.guest_XMM4
);
320 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
321 guest_XMM4
), (Addr
)&fs
->xmm
[4], sizeof(U128
));
322 COPY_OUT_XMM(fs
->xmm
[5], tst
->arch
.vex
.guest_XMM5
);
323 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
324 guest_XMM5
), (Addr
)&fs
->xmm
[5], sizeof(U128
));
325 COPY_OUT_XMM(fs
->xmm
[6], tst
->arch
.vex
.guest_XMM6
);
326 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
327 guest_XMM6
), (Addr
)&fs
->xmm
[6], sizeof(U128
));
328 COPY_OUT_XMM(fs
->xmm
[7], tst
->arch
.vex
.guest_XMM7
);
329 VG_TRACK(copy_reg_to_mem
, part
, tid
, offsetof(VexGuestX86State
,
330 guest_XMM7
), (Addr
)&fs
->xmm
[7], sizeof(U128
));
334 /* Architecture-specific part of VG_(restore_context). */
335 void ML_(restore_machine_context
)(ThreadId tid
, vki_ucontext_t
*uc
,
336 CorePart part
, Bool esp_is_thrptr
)
338 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
339 struct vki_fpchip_state
*fs
340 = &uc
->uc_mcontext
.fpregs
.fp_reg_set
.fpchip_state
;
343 if (uc
->uc_flags
& VKI_UC_CPU
) {
344 /* Common registers */
345 tst
->arch
.vex
.guest_EIP
= uc
->uc_mcontext
.gregs
[VKI_EIP
];
346 VG_TRACK(copy_mem_to_reg
, part
, tid
,
347 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EIP
], OFFSET_x86_EIP
,
349 tst
->arch
.vex
.guest_EAX
= uc
->uc_mcontext
.gregs
[VKI_EAX
];
350 VG_TRACK(copy_mem_to_reg
, part
, tid
,
351 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EAX
], OFFSET_x86_EAX
,
353 tst
->arch
.vex
.guest_EBX
= uc
->uc_mcontext
.gregs
[VKI_EBX
];
354 VG_TRACK(copy_mem_to_reg
, part
, tid
,
355 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBX
], OFFSET_x86_EBX
,
357 tst
->arch
.vex
.guest_ECX
= uc
->uc_mcontext
.gregs
[VKI_ECX
];
358 VG_TRACK(copy_mem_to_reg
, part
, tid
,
359 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ECX
], OFFSET_x86_ECX
,
361 tst
->arch
.vex
.guest_EDX
= uc
->uc_mcontext
.gregs
[VKI_EDX
];
362 VG_TRACK(copy_mem_to_reg
, part
, tid
,
363 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDX
], OFFSET_x86_EDX
,
365 tst
->arch
.vex
.guest_EBP
= uc
->uc_mcontext
.gregs
[VKI_EBP
];
366 VG_TRACK(copy_mem_to_reg
, part
, tid
,
367 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EBP
], OFFSET_x86_EBP
,
369 tst
->arch
.vex
.guest_ESI
= uc
->uc_mcontext
.gregs
[VKI_ESI
];
370 VG_TRACK(copy_mem_to_reg
, part
, tid
,
371 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESI
], OFFSET_x86_ESI
,
373 tst
->arch
.vex
.guest_EDI
= uc
->uc_mcontext
.gregs
[VKI_EDI
];
374 VG_TRACK(copy_mem_to_reg
, part
, tid
,
375 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EDI
], OFFSET_x86_EDI
,
377 tst
->arch
.vex
.guest_ESP
= uc
->uc_mcontext
.gregs
[VKI_UESP
];
378 VG_TRACK(copy_mem_to_reg
, part
, tid
,
379 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_UESP
], OFFSET_x86_ESP
,
383 /* The thrptr value is passed by libc to the kernel in the otherwise
384 unused ESP field. This is used when a new thread is created. */
385 VG_TRACK(pre_mem_read
, part
, tid
,
386 "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
387 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ESP
], sizeof(UWord
));
388 if (uc
->uc_mcontext
.gregs
[VKI_ESP
]) {
389 tst
->os_state
.thrptr
= uc
->uc_mcontext
.gregs
[VKI_ESP
];
390 ML_(update_gdt_lwpgs
)(tid
);
394 /* Ignore ERR and TRAPNO. */
396 /* Segment registers */
397 tst
->arch
.vex
.guest_CS
= uc
->uc_mcontext
.gregs
[VKI_CS
];
398 VG_TRACK(copy_mem_to_reg
, part
, tid
,
399 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_CS
], OFFSET_x86_CS
,
401 tst
->arch
.vex
.guest_DS
= uc
->uc_mcontext
.gregs
[VKI_DS
];
402 VG_TRACK(copy_mem_to_reg
, part
, tid
,
403 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_DS
], OFFSET_x86_DS
,
405 tst
->arch
.vex
.guest_SS
= uc
->uc_mcontext
.gregs
[VKI_SS
];
406 VG_TRACK(copy_mem_to_reg
, part
, tid
,
407 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_SS
], OFFSET_x86_SS
,
409 tst
->arch
.vex
.guest_ES
= uc
->uc_mcontext
.gregs
[VKI_ES
];
410 VG_TRACK(copy_mem_to_reg
, part
, tid
,
411 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_ES
], OFFSET_x86_ES
,
413 tst
->arch
.vex
.guest_FS
= uc
->uc_mcontext
.gregs
[VKI_FS
];
414 VG_TRACK(copy_mem_to_reg
, part
, tid
,
415 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_FS
], OFFSET_x86_FS
,
417 tst
->arch
.vex
.guest_GS
= uc
->uc_mcontext
.gregs
[VKI_GS
];
418 VG_TRACK(copy_mem_to_reg
, part
, tid
,
419 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_GS
], OFFSET_x86_GS
,
427 Bool ok_restore
= False
;
429 VG_TRACK(pre_mem_read
, part
, tid
,
430 "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
431 (Addr
)&uc
->uc_mcontext
.gregs
[VKI_EFL
], sizeof(UWord
));
432 eflags
= uc
->uc_mcontext
.gregs
[VKI_EFL
];
433 orig_eflags
= LibVEX_GuestX86_get_eflags(&tst
->arch
.vex
);
435 /* The kernel disallows the ID flag to be changed via the setcontext
436 call, thus do the same. */
437 if (orig_eflags
& VKI_EFLAGS_ID_BIT
)
438 new_eflags
|= VKI_EFLAGS_ID_BIT
;
440 new_eflags
&= ~VKI_EFLAGS_ID_BIT
;
441 LibVEX_GuestX86_put_eflags(new_eflags
, &tst
->arch
.vex
);
442 VG_TRACK(post_reg_write
, part
, tid
,
443 offsetof(VexGuestX86State
, guest_CC_DEP1
), sizeof(UWord
));
444 VG_TRACK(post_reg_write
, part
, tid
,
445 offsetof(VexGuestX86State
, guest_CC_DEP2
), sizeof(UWord
));
447 /* Check if this context was created by us in VG_(save_context). In
448 that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
449 values which we previously stashed into unused members of the
451 if (eflags
!= ~VKI_UC_GUEST_EFLAGS_NEG(uc
)) {
452 VG_(debugLog
)(1, "syswrap-solaris",
453 "The eflags value was restored from an "
454 "explicitly set value in thread %u.\n", tid
);
461 buf
[0] = VKI_UC_GUEST_CC_OP(uc
);
462 buf
[1] = VKI_UC_GUEST_CC_NDEP(uc
);
463 buf
[2] = VKI_UC_GUEST_CC_DEP1(uc
);
464 buf
[3] = VKI_UC_GUEST_CC_DEP2(uc
);
466 checksum
= ML_(fletcher32
)((UShort
*)&buf
,
467 sizeof(buf
) / sizeof(UShort
));
468 if (checksum
== VKI_UC_GUEST_EFLAGS_CHECKSUM(uc
)) {
469 /* Check ok, the full restoration is possible. */
470 VG_(debugLog
)(1, "syswrap-solaris",
471 "The CC_* guest state values were fully "
472 "restored in thread %u.\n", tid
);
475 tst
->arch
.vex
.guest_CC_OP
= VKI_UC_GUEST_CC_OP(uc
);
476 tst
->arch
.vex
.guest_CC_NDEP
= VKI_UC_GUEST_CC_NDEP(uc
);
477 tst
->arch
.vex
.guest_CC_DEP1
= VKI_UC_GUEST_CC_DEP1(uc
);
478 VG_TRACK(copy_mem_to_reg
, part
, tid
,
479 (Addr
)&VKI_UC_GUEST_CC_DEP1(uc
),
480 offsetof(VexGuestX86State
, guest_CC_DEP1
),
482 tst
->arch
.vex
.guest_CC_DEP2
= VKI_UC_GUEST_CC_DEP2(uc
);
483 VG_TRACK(copy_mem_to_reg
, part
, tid
,
484 (Addr
)&VKI_UC_GUEST_CC_DEP2(uc
),
485 offsetof(VexGuestX86State
, guest_CC_DEP2
),
491 VG_(debugLog
)(1, "syswrap-solaris",
492 "Cannot fully restore the CC_* guest state "
493 "values, using approximate eflags in thread "
498 if (uc
->uc_flags
& VKI_UC_FPU
) {
504 /* Flags and control words */
505 VG_TRACK(pre_mem_read
, part
, tid
,
506 "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
507 (Addr
)&fs
->state
, 28);
509 for (i
= 0; i
< 8; i
++) {
510 Addr addr
= (Addr
)&fs
->state
+ 28 + i
* 10;
511 VG_TRACK(copy_mem_to_reg
, part
, tid
, addr
,
512 offsetof(VexGuestX86State
, guest_FPREG
[i
]), sizeof(ULong
));
514 note
= LibVEX_GuestX86_put_x87((UChar
*)&fs
->state
, &tst
->arch
.vex
);
515 if (note
!= EmNote_NONE
)
516 VG_(message
)(Vg_UserMsg
,
517 "Error restoring x87 state in thread %u: %s.\n",
518 tid
, LibVEX_EmNote_string(note
));
521 VG_TRACK(pre_mem_read
, part
, tid
,
522 "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
523 (Addr
)&fs
->mxcsr
, sizeof(fs
->mxcsr
));
524 note
= LibVEX_GuestX86_put_mxcsr(fs
->mxcsr
, &tst
->arch
.vex
);
525 if (note
!= EmNote_NONE
)
526 VG_(message
)(Vg_UserMsg
,
527 "Error restoring mxcsr state in thread %u: %s.\n",
528 tid
, LibVEX_EmNote_string(note
));
530 #define COPY_IN_XMM(src, dest) \
532 dest[0] = src._l[0]; \
533 dest[1] = src._l[1]; \
534 dest[2] = src._l[2]; \
535 dest[3] = src._l[3]; \
537 COPY_IN_XMM(fs
->xmm
[0], tst
->arch
.vex
.guest_XMM0
);
538 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[0],
539 offsetof(VexGuestX86State
, guest_XMM0
), sizeof(U128
));
540 COPY_IN_XMM(fs
->xmm
[1], tst
->arch
.vex
.guest_XMM1
);
541 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[1],
542 offsetof(VexGuestX86State
, guest_XMM1
), sizeof(U128
));
543 COPY_IN_XMM(fs
->xmm
[2], tst
->arch
.vex
.guest_XMM2
);
544 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[2],
545 offsetof(VexGuestX86State
, guest_XMM2
), sizeof(U128
));
546 COPY_IN_XMM(fs
->xmm
[3], tst
->arch
.vex
.guest_XMM3
);
547 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[3],
548 offsetof(VexGuestX86State
, guest_XMM3
), sizeof(U128
));
549 COPY_IN_XMM(fs
->xmm
[4], tst
->arch
.vex
.guest_XMM4
);
550 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[4],
551 offsetof(VexGuestX86State
, guest_XMM4
), sizeof(U128
));
552 COPY_IN_XMM(fs
->xmm
[5], tst
->arch
.vex
.guest_XMM5
);
553 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[5],
554 offsetof(VexGuestX86State
, guest_XMM5
), sizeof(U128
));
555 COPY_IN_XMM(fs
->xmm
[6], tst
->arch
.vex
.guest_XMM6
);
556 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[6],
557 offsetof(VexGuestX86State
, guest_XMM6
), sizeof(U128
));
558 COPY_IN_XMM(fs
->xmm
[7], tst
->arch
.vex
.guest_XMM7
);
559 VG_TRACK(copy_mem_to_reg
, part
, tid
, (Addr
)&fs
->xmm
[7],
560 offsetof(VexGuestX86State
, guest_XMM7
), sizeof(U128
));
565 /* Allocate GDT for a given thread. */
566 void ML_(setup_gdt
)(VexGuestX86State
*vex
)
568 Addr gdt
= (Addr
)VG_(calloc
)("syswrap-solaris-x86.gdt",
569 VEX_GUEST_X86_GDT_NENT
,
570 sizeof(VexGuestX86SegDescr
));
571 vex
->guest_GDT
= gdt
;
574 /* Deallocate GDT for a given thread. */
575 void ML_(cleanup_gdt
)(VexGuestX86State
*vex
)
579 VG_(free
)((void *) (HWord
) vex
->guest_GDT
);
583 /* For a given thread, update the LWPGS descriptor in the thread's GDT
584 according to the thread pointer. */
585 void ML_(update_gdt_lwpgs
)(ThreadId tid
)
587 ThreadState
*tst
= VG_(get_ThreadState
)(tid
);
588 Addr base
= tst
->os_state
.thrptr
;
589 VexGuestX86SegDescr
*gdt
590 = (VexGuestX86SegDescr
*) (HWord
) tst
->arch
.vex
.guest_GDT
;
591 VexGuestX86SegDescr desc
;
595 VG_(memset
)(&desc
, 0, sizeof(desc
));
597 desc
.LdtEnt
.Bits
.LimitLow
= -1;
598 desc
.LdtEnt
.Bits
.LimitHi
= -1;
599 desc
.LdtEnt
.Bits
.BaseLow
= base
& 0xffff;
600 desc
.LdtEnt
.Bits
.BaseMid
= (base
>> 16) & 0xff;
601 desc
.LdtEnt
.Bits
.BaseHi
= (base
>> 24) & 0xff;
602 desc
.LdtEnt
.Bits
.Pres
= 1;
603 desc
.LdtEnt
.Bits
.Dpl
= 3; /* SEL_UPL */
604 desc
.LdtEnt
.Bits
.Type
= 19; /* SDT_MEMRWA */
605 desc
.LdtEnt
.Bits
.Granularity
= 1; /* SDP_PAGES */
606 desc
.LdtEnt
.Bits
.Default_Big
= 1; /* SDP_OP32 */
609 gdt
[VKI_GDT_LWPGS
] = desc
;
612 tst
->arch
.vex
.guest_GS
= VKI_LWPGS_SEL
;
613 VG_TRACK(post_reg_write
, Vg_CoreSysCall
, tid
, OFFSET_x86_GS
,
618 /* ---------------------------------------------------------------------
619 PRE/POST wrappers for x86/Solaris-specific syscalls
620 ------------------------------------------------------------------ */
622 #define PRE(name) DEFN_PRE_TEMPLATE(x86_solaris, name)
623 #define POST(name) DEFN_POST_TEMPLATE(x86_solaris, name)
629 /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
631 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", SARG1
, ARG2
,
632 (HChar
*)ARG2
, ARG3
, SARG4
);
633 PRE_REG_READ4(long, "fstatat64", int, fildes
, const char *, path
,
634 struct stat64
*, buf
, int, flag
);
636 PRE_MEM_RASCIIZ("fstatat64(path)", ARG2
);
637 PRE_MEM_WRITE("fstatat64(buf)", ARG3
, sizeof(struct vki_stat64
));
640 if (ARG1
!= VKI_AT_FDCWD
&&
641 !ML_(fd_allowed
)(ARG1
, "fstatat64", tid
, False
))
642 SET_STATUS_Failure(VKI_EBADF
);
647 POST_MEM_WRITE(ARG3
, sizeof(struct vki_stat64
));
652 /* int openat64(int fildes, const char *filename, int flags);
653 int openat64(int fildes, const char *filename, int flags, mode_t mode);
655 *flags
|= SfMayBlock
;
657 if (ARG3
& VKI_O_CREAT
) {
659 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", SARG1
, ARG2
,
660 (HChar
*)ARG2
, SARG3
, SARG4
);
661 PRE_REG_READ4(long, "openat64", int, fildes
, const char *, filename
,
662 int, flags
, vki_mode_t
, mode
);
666 PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", SARG1
, ARG2
, (HChar
*)ARG2
,
668 PRE_REG_READ3(long, "openat64", int, fildes
, const char *, filename
,
672 PRE_MEM_RASCIIZ("openat64(filename)", ARG2
);
675 if (ARG1
!= VKI_AT_FDCWD
&& !ML_(fd_allowed
)(ARG1
, "openat64", tid
, False
))
676 SET_STATUS_Failure(VKI_EBADF
);
681 if (!ML_(fd_allowed
)(RES
, "openat64", tid
, True
)) {
683 SET_STATUS_Failure(VKI_EMFILE
);
685 else if (VG_(clo_track_fds
))
686 ML_(record_fd_open_with_given_name
)(tid
, RES
, (HChar
*)ARG2
);
691 /* offset_t llseek(int fildes, offset_t offset, int whence); */
692 PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", SARG1
, ARG2
, ARG3
, SARG4
);
693 PRE_REG_READ4(long, "llseek", int, fildes
, vki_u32
, offset_low
,
694 vki_u32
, offset_high
, int, whence
);
697 if (!ML_(fd_allowed
)(ARG1
, "llseek", tid
, False
))
698 SET_STATUS_Failure(VKI_EBADF
);
703 /* void *mmap64(void *addr, size_t len, int prot, int flags,
704 int fildes, uint32_t offlo, uint32_t offhi); */
705 /* Note this wrapper assumes a little-endian architecture, offlo and offhi
706 have to be swapped if a big-endian architecture is present. */
707 #if !defined(VG_LITTLEENDIAN)
708 #error "Unexpected endianness."
709 #endif /* !VG_LITTLEENDIAN */
716 vg_assert(VKI_PAGE_SIZE
== 4096);
717 vg_assert(sizeof(u
) == sizeof(offset
));
719 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
720 ARG1
, ARG2
, ARG3
, ARG4
, SARG5
, ARG6
, ARG7
);
721 PRE_REG_READ7(long, "mmap", void *, start
, vki_size_t
, length
,
722 int, prot
, int, flags
, int, fd
, uint32_t, offlo
,
725 /* The offlo and offhi values can actually represent a negative value.
726 Make sure it's passed correctly to the generic mmap wrapper. */
727 u
= ((ULong
)ARG7
<< 32) + ARG6
;
728 offset
= *(Off64T
*)&u
;
730 r
= ML_(generic_PRE_sys_mmap
)(tid
, ARG1
, ARG2
, ARG3
, ARG4
, ARG5
, offset
);
731 SET_STATUS_from_SysRes(r
);
736 /* int stat64(const char *path, struct stat64 *buf); */
737 PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
738 PRE_REG_READ2(long, "stat64", const char *, path
, struct stat64
*, buf
);
740 PRE_MEM_RASCIIZ("stat64(path)", ARG1
);
741 PRE_MEM_WRITE("stat64(buf)", ARG2
, sizeof(struct vki_stat64
));
746 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
751 /* int lstat64(const char *path, struct stat64 *buf); */
752 PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
753 PRE_REG_READ2(long, "lstat64", const char *, path
, struct stat64
*, buf
);
755 PRE_MEM_RASCIIZ("lstat64(path)", ARG1
);
756 PRE_MEM_WRITE("lstat64(buf)", ARG2
, sizeof(struct vki_stat64
));
761 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
766 /* int fstat64(int fildes, struct stat64 *buf); */
767 PRINT("sys_fstat64 ( %ld, %#lx )", SARG1
, ARG2
);
768 PRE_REG_READ2(long, "fstat64", int, fildes
, struct stat64
*, buf
);
769 PRE_MEM_WRITE("fstat64(buf)", ARG2
, sizeof(struct vki_stat64
));
772 if (!ML_(fd_allowed
)(ARG1
, "fstat64", tid
, False
))
773 SET_STATUS_Failure(VKI_EBADF
);
778 POST_MEM_WRITE(ARG2
, sizeof(struct vki_stat64
));
781 static void do_statvfs64_post(struct vki_statvfs64
*stats
, ThreadId tid
)
783 POST_FIELD_WRITE(stats
->f_bsize
);
784 POST_FIELD_WRITE(stats
->f_frsize
);
785 POST_FIELD_WRITE(stats
->f_blocks
);
786 POST_FIELD_WRITE(stats
->f_bfree
);
787 POST_FIELD_WRITE(stats
->f_bavail
);
788 POST_FIELD_WRITE(stats
->f_files
);
789 POST_FIELD_WRITE(stats
->f_ffree
);
790 POST_FIELD_WRITE(stats
->f_favail
);
791 POST_FIELD_WRITE(stats
->f_fsid
);
792 POST_MEM_WRITE((Addr
) stats
->f_basetype
, VG_(strlen
)(stats
->f_basetype
) + 1);
793 POST_FIELD_WRITE(stats
->f_flag
);
794 POST_FIELD_WRITE(stats
->f_namemax
);
795 POST_MEM_WRITE((Addr
) stats
->f_fstr
, VG_(strlen
)(stats
->f_fstr
) + 1);
800 /* int statvfs64(const char *path, struct statvfs64 *buf); */
801 *flags
|= SfMayBlock
;
802 PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*) ARG1
, ARG2
);
803 PRE_REG_READ2(long, "statvfs64", const char *, path
,
804 struct vki_statvfs64
*, buf
);
805 PRE_MEM_RASCIIZ("statvfs64(path)", ARG1
);
806 PRE_MEM_WRITE("statvfs64(buf)", ARG2
, sizeof(struct vki_statvfs64
));
811 do_statvfs64_post((struct vki_statvfs64
*) ARG2
, tid
);
816 /* int fstatvfs64(int fd, struct statvfs64 *buf); */
817 *flags
|= SfMayBlock
;
818 PRINT("sys_fstatvfs64 ( %ld, %#lx )", SARG1
, ARG2
);
819 PRE_REG_READ2(long, "fstatvfs64", int, fd
, struct vki_statvfs64
*, buf
);
820 PRE_MEM_WRITE("fstatvfs64(buf)", ARG2
, sizeof(struct vki_statvfs64
));
823 if (!ML_(fd_allowed
)(ARG1
, "fstatvfs64", tid
, False
))
824 SET_STATUS_Failure(VKI_EBADF
);
829 do_statvfs64_post((struct vki_statvfs64
*) ARG2
, tid
);
834 /* int setrlimit64(int resource, struct rlimit64 *rlim); */
835 struct vki_rlimit64
*limit
= (struct vki_rlimit64
*)ARG2
;
836 PRINT("sys_setrlimit64 ( %ld, %#lx )", SARG1
, ARG2
);
837 PRE_REG_READ2(long, "setrlimit64", int, resource
, struct rlimit64
*, rlim
);
838 PRE_MEM_READ("setrlimit64(rlim)", ARG2
, sizeof(struct vki_rlimit64
));
840 if (limit
&& limit
->rlim_cur
> limit
->rlim_max
)
841 SET_STATUS_Failure(VKI_EINVAL
);
842 else if (ARG1
== VKI_RLIMIT_NOFILE
) {
843 if (limit
->rlim_cur
> VG_(fd_hard_limit
) ||
844 limit
->rlim_max
!= VG_(fd_hard_limit
)) {
845 SET_STATUS_Failure(VKI_EPERM
);
848 VG_(fd_soft_limit
) = limit
->rlim_cur
;
849 SET_STATUS_Success(0);
852 else if (ARG1
== VKI_RLIMIT_DATA
) {
853 if (limit
->rlim_cur
> VG_(client_rlimit_data
).rlim_max
||
854 limit
->rlim_max
> VG_(client_rlimit_data
).rlim_max
) {
855 SET_STATUS_Failure(VKI_EPERM
);
858 VG_(client_rlimit_data
).rlim_max
= limit
->rlim_max
;
859 VG_(client_rlimit_data
).rlim_cur
= limit
->rlim_cur
;
860 SET_STATUS_Success(0);
863 else if (ARG1
== VKI_RLIMIT_STACK
&& tid
== 1) {
864 if (limit
->rlim_cur
> VG_(client_rlimit_stack
).rlim_max
||
865 limit
->rlim_max
> VG_(client_rlimit_stack
).rlim_max
) {
866 SET_STATUS_Failure(VKI_EPERM
);
869 /* Change the value of client_stack_szB to the rlim_cur value but
870 only if it is smaller than the size of the allocated stack for the
872 if (limit
->rlim_cur
<= VG_(clstk_max_size
))
873 VG_(threads
)[tid
].client_stack_szB
= limit
->rlim_cur
;
875 VG_(client_rlimit_stack
).rlim_max
= limit
->rlim_max
;
876 VG_(client_rlimit_stack
).rlim_cur
= limit
->rlim_cur
;
877 SET_STATUS_Success(0);
884 /* int getrlimit64(int resource, struct rlimit64 *rlim); */
885 PRINT("sys_getrlimit64 ( %ld, %#lx )", SARG1
, ARG2
);
886 PRE_REG_READ2(long, "getrlimit64",
887 int, resource
, struct rlimit64
*, rlim
);
888 PRE_MEM_WRITE("getrlimit64(rlim)", ARG2
, sizeof(struct vki_rlimit64
));
891 POST(sys_getrlimit64
)
893 /* Based on common_post_getrlimit() from syswrap-generic.c. */
894 struct vki_rlimit64
*rlim
= (struct vki_rlimit64
*)ARG2
;
896 POST_MEM_WRITE(ARG2
, sizeof(struct vki_rlimit64
));
898 switch (ARG1
/*resource*/) {
899 case VKI_RLIMIT_NOFILE
:
900 rlim
->rlim_cur
= VG_(fd_soft_limit
);
901 rlim
->rlim_max
= VG_(fd_hard_limit
);
903 case VKI_RLIMIT_DATA
:
904 rlim
->rlim_cur
= VG_(client_rlimit_data
).rlim_cur
;
905 rlim
->rlim_max
= VG_(client_rlimit_data
).rlim_max
;
907 case VKI_RLIMIT_STACK
:
908 rlim
->rlim_cur
= VG_(client_rlimit_stack
).rlim_cur
;
909 rlim
->rlim_max
= VG_(client_rlimit_stack
).rlim_max
;
916 /* ssize32_t pread64(int fd, void *buf, size32_t count,
917 uint32_t offset_1, uint32_t offset_2);
919 *flags
|= SfMayBlock
;
920 PRINT("sys_pread64 ( %ld, %#lx, %lu, %#lx, %#lx )",
921 SARG1
, ARG2
, ARG3
, ARG4
, ARG5
);
922 PRE_REG_READ5(long, "pread64", int, fd
, void *, buf
, vki_size32_t
, count
,
923 vki_uint32_t
, offset_1
, vki_uint32_t
, offset_2
);
924 PRE_MEM_WRITE("pread64(buf)", ARG2
, ARG3
);
927 if (!ML_(fd_allowed
)(ARG1
, "pread64", tid
, False
))
928 SET_STATUS_Failure(VKI_EBADF
);
933 POST_MEM_WRITE(ARG2
, RES
);
938 /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
939 uint32_t offset_1, uint32_t offset_2);
941 *flags
|= SfMayBlock
;
942 PRINT("sys_pwrite64 ( %ld, %#lx, %lu, %#lx, %#lx )",
943 SARG1
, ARG2
, ARG3
, ARG4
, ARG5
);
944 PRE_REG_READ5(long, "pwrite64", int, fd
, void *, buf
, vki_size32_t
, count
,
945 vki_uint32_t
, offset_1
, vki_uint32_t
, offset_2
);
946 PRE_MEM_READ("pwrite64(buf)", ARG2
, ARG3
);
949 if (!ML_(fd_allowed
)(ARG1
, "pwrite64", tid
, False
))
950 SET_STATUS_Failure(VKI_EBADF
);
955 /* int open64(const char *filename, int flags);
956 int open64(const char *filename, int flags, mode_t mode); */
957 *flags
|= SfMayBlock
;
959 if (ARG2
& VKI_O_CREAT
) {
961 PRINT("sys_open64 ( %#lx(%s), %#lx, %ld )", ARG1
, (HChar
*)ARG1
, ARG2
,
963 PRE_REG_READ3(long, "open64", const char *, filename
, int, flags
,
968 PRINT("sys_open64 ( %#lx(%s), %#lx )", ARG1
, (HChar
*)ARG1
, ARG2
);
969 PRE_REG_READ2(long, "open64", const char *, filename
, int, flags
);
971 PRE_MEM_RASCIIZ("open(filename)", ARG1
);
976 if (!ML_(fd_allowed
)(RES
, "open64", tid
, True
)) {
978 SET_STATUS_Failure(VKI_EMFILE
);
980 else if (VG_(clo_track_fds
))
981 ML_(record_fd_open_with_given_name
)(tid
, RES
, (HChar
*)ARG1
);
987 #endif // defined(VGP_x86_solaris)
989 /*--------------------------------------------------------------------*/
991 /*--------------------------------------------------------------------*/