Add DRD suppression patterns for races triggered by std::ostream
[valgrind.git] / coregrind / m_syswrap / syswrap-x86-linux.c
blobbec145acacc81868770b2f17667a699cef2bd935
2 /*--------------------------------------------------------------------*/
3 /*--- Platform-specific syscalls stuff. syswrap-x86-linux.c ---*/
4 /*--------------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2000-2017 Nicholas Nethercote
11 njn@valgrind.org
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
28 The GNU General Public License is contained in the file COPYING.
31 #if defined(VGP_x86_linux)
33 /* TODO/FIXME jrs 20050207: assignments to the syscall return result
34 in interrupted_syscall() need to be reviewed. They don't seem
35 to assign the shadow state.
38 #include "pub_core_basics.h"
39 #include "pub_core_vki.h"
40 #include "pub_core_vkiscnums.h"
41 #include "pub_core_threadstate.h"
42 #include "pub_core_aspacemgr.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcprint.h"
47 #include "pub_core_libcproc.h"
48 #include "pub_core_libcsignal.h"
49 #include "pub_core_mallocfree.h"
50 #include "pub_core_options.h"
51 #include "pub_core_scheduler.h"
52 #include "pub_core_sigframe.h" // For VG_(sigframe_destroy)()
53 #include "pub_core_signals.h"
54 #include "pub_core_syscall.h"
55 #include "pub_core_syswrap.h"
56 #include "pub_core_tooliface.h"
58 #include "priv_types_n_macros.h"
59 #include "priv_syswrap-generic.h" /* for decls of generic wrappers */
60 #include "priv_syswrap-linux.h" /* for decls of linux-ish wrappers */
61 #include "priv_syswrap-linux-variants.h" /* decls of linux variant wrappers */
62 #include "priv_syswrap-main.h"
65 /* ---------------------------------------------------------------------
66 clone() handling
67 ------------------------------------------------------------------ */
69 /* Call f(arg1), but first switch stacks, using 'stack' as the new
70 stack, and use 'retaddr' as f's return-to address. Also, clear all
71 the integer registers before entering f.*/
72 __attribute__((noreturn))
73 void ML_(call_on_new_stack_0_1) ( Addr stack,
74 Addr retaddr,
75 void (*f)(Word),
76 Word arg1 );
77 // 4(%esp) == stack
78 // 8(%esp) == retaddr
79 // 12(%esp) == f
80 // 16(%esp) == arg1
81 asm(
82 ".text\n"
83 ".globl vgModuleLocal_call_on_new_stack_0_1\n"
84 "vgModuleLocal_call_on_new_stack_0_1:\n"
85 " movl %esp, %esi\n" // remember old stack pointer
86 " movl 4(%esi), %esp\n" // set stack, assume %esp is now 16-byte aligned
87 " subl $12, %esp\n" // skip 12 bytes
88 " pushl 16(%esi)\n" // arg1 to stack, %esp is 16-byte aligned
89 " pushl 8(%esi)\n" // retaddr to stack
90 " pushl 12(%esi)\n" // f to stack
91 " movl $0, %eax\n" // zero all GP regs
92 " movl $0, %ebx\n"
93 " movl $0, %ecx\n"
94 " movl $0, %edx\n"
95 " movl $0, %esi\n"
96 " movl $0, %edi\n"
97 " movl $0, %ebp\n"
98 " ret\n" // jump to f
99 " ud2\n" // should never get here
100 ".previous\n"
105 Perform a clone system call. clone is strange because it has
106 fork()-like return-twice semantics, so it needs special
107 handling here.
109 Upon entry, we have:
111 int (fn)(void*) in 0+FSZ(%esp)
112 void* child_stack in 4+FSZ(%esp)
113 int flags in 8+FSZ(%esp)
114 void* arg in 12+FSZ(%esp)
115 pid_t* child_tid in 16+FSZ(%esp)
116 pid_t* parent_tid in 20+FSZ(%esp)
117 void* tls_ptr in 24+FSZ(%esp)
119 System call requires:
121 int $__NR_clone in %eax
122 int flags in %ebx
123 void* child_stack in %ecx
124 pid_t* parent_tid in %edx
125 pid_t* child_tid in %edi
126 void* tls_ptr in %esi
128 Returns an Int encoded in the linux-x86 way, not a SysRes.
130 #define FSZ "4+4+4+4" /* frame size = retaddr+ebx+edi+esi */
131 #define __NR_CLONE VG_STRINGIFY(__NR_clone)
132 #define __NR_EXIT VG_STRINGIFY(__NR_exit)
134 // See priv_syswrap-linux.h for arg profile.
135 asm(
136 ".text\n"
137 ".globl do_syscall_clone_x86_linux\n"
138 "do_syscall_clone_x86_linux:\n"
139 " push %ebx\n"
140 " push %edi\n"
141 " push %esi\n"
143 /* set up child stack with function and arg */
144 " movl 4+"FSZ"(%esp), %ecx\n" /* syscall arg2: child stack */
145 " movl 12+"FSZ"(%esp), %ebx\n" /* fn arg */
146 " movl 0+"FSZ"(%esp), %eax\n" /* fn */
147 " andl $-16, %ecx\n" /* align to 16-byte */
148 " lea -20(%ecx), %ecx\n" /* allocate 16*n+4 bytes on stack */
149 " movl %ebx, 4(%ecx)\n" /* fn arg */
150 " movl %eax, 0(%ecx)\n" /* fn */
152 /* get other args to clone */
153 " movl 8+"FSZ"(%esp), %ebx\n" /* syscall arg1: flags */
154 " movl 20+"FSZ"(%esp), %edx\n" /* syscall arg3: parent tid * */
155 " movl 16+"FSZ"(%esp), %edi\n" /* syscall arg5: child tid * */
156 " movl 24+"FSZ"(%esp), %esi\n" /* syscall arg4: tls_ptr * */
157 " movl $"__NR_CLONE", %eax\n"
158 " int $0x80\n" /* clone() */
159 " testl %eax, %eax\n" /* child if retval == 0 */
160 " jnz 1f\n"
162 /* CHILD - call thread function */
163 " popl %eax\n" /* child %esp is 16-byte aligned */
164 " call *%eax\n" /* call fn */
166 /* exit with result */
167 " movl %eax, %ebx\n" /* arg1: return value from fn */
168 " movl $"__NR_EXIT", %eax\n"
169 " int $0x80\n"
171 /* Hm, exit returned */
172 " ud2\n"
174 "1:\n" /* PARENT or ERROR */
175 " pop %esi\n"
176 " pop %edi\n"
177 " pop %ebx\n"
178 " ret\n"
179 ".previous\n"
182 #undef FSZ
183 #undef __NR_CLONE
184 #undef __NR_EXIT
187 /* ---------------------------------------------------------------------
188 LDT/GDT simulation
189 ------------------------------------------------------------------ */
191 /* Details of the LDT simulation
192 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
194 When a program runs natively, the linux kernel allows each *thread*
195 in it to have its own LDT. Almost all programs never do this --
196 it's wildly unportable, after all -- and so the kernel never
197 allocates the structure, which is just as well as an LDT occupies
198 64k of memory (8192 entries of size 8 bytes).
200 A thread may choose to modify its LDT entries, by doing the
201 __NR_modify_ldt syscall. In such a situation the kernel will then
202 allocate an LDT structure for it. Each LDT entry is basically a
203 (base, limit) pair. A virtual address in a specific segment is
204 translated to a linear address by adding the segment's base value.
205 In addition, the virtual address must not exceed the limit value.
207 To use an LDT entry, a thread loads one of the segment registers
208 (%cs, %ss, %ds, %es, %fs, %gs) with the index of the LDT entry (0
209 .. 8191) it wants to use. In fact, the required value is (index <<
210 3) + 7, but that's not important right now. Any normal instruction
211 which includes an addressing mode can then be made relative to that
212 LDT entry by prefixing the insn with a so-called segment-override
213 prefix, a byte which indicates which of the 6 segment registers
214 holds the LDT index.
216 Now, a key constraint is that valgrind's address checks operate in
217 terms of linear addresses. So we have to explicitly translate
218 virtual addrs into linear addrs, and that means doing a complete
219 LDT simulation.
221 Calls to modify_ldt are intercepted. For each thread, we maintain
222 an LDT (with the same normally-never-allocated optimisation that
223 the kernel does). This is updated as expected via calls to
224 modify_ldt.
226 When a thread does an amode calculation involving a segment
227 override prefix, the relevant LDT entry for the thread is
228 consulted. It all works.
230 There is a conceptual problem, which appears when switching back to
231 native execution, either temporarily to pass syscalls to the
232 kernel, or permanently, when debugging V. Problem at such points
233 is that it's pretty pointless to copy the simulated machine's
234 segment registers to the real machine, because we'd also need to
235 copy the simulated LDT into the real one, and that's prohibitively
236 expensive.
238 Fortunately it looks like no syscalls rely on the segment regs or
239 LDT being correct, so we can get away with it. Apart from that the
240 simulation is pretty straightforward. All 6 segment registers are
241 tracked, although only %ds, %es, %fs and %gs are allowed as
242 prefixes. Perhaps it could be restricted even more than that -- I
243 am not sure what is and isn't allowed in user-mode.
246 /* Translate a struct modify_ldt_ldt_s to a VexGuestX86SegDescr, using
247 the Linux kernel's logic (cut-n-paste of code in
248 linux/kernel/ldt.c). */
250 static
251 void translate_to_hw_format ( /* IN */ vki_modify_ldt_t* inn,
252 /* OUT */ VexGuestX86SegDescr* out,
253 Int oldmode )
255 UInt entry_1, entry_2;
256 vg_assert(8 == sizeof(VexGuestX86SegDescr));
258 if (0)
259 VG_(printf)("translate_to_hw_format: base %#lx, limit %u\n",
260 inn->base_addr, inn->limit );
262 /* Allow LDTs to be cleared by the user. */
263 if (inn->base_addr == 0 && inn->limit == 0) {
264 if (oldmode ||
265 (inn->contents == 0 &&
266 inn->read_exec_only == 1 &&
267 inn->seg_32bit == 0 &&
268 inn->limit_in_pages == 0 &&
269 inn->seg_not_present == 1 &&
270 inn->useable == 0 )) {
271 entry_1 = 0;
272 entry_2 = 0;
273 goto install;
277 entry_1 = ((inn->base_addr & 0x0000ffff) << 16) |
278 (inn->limit & 0x0ffff);
279 entry_2 = (inn->base_addr & 0xff000000) |
280 ((inn->base_addr & 0x00ff0000) >> 16) |
281 (inn->limit & 0xf0000) |
282 ((inn->read_exec_only ^ 1) << 9) |
283 (inn->contents << 10) |
284 ((inn->seg_not_present ^ 1) << 15) |
285 (inn->seg_32bit << 22) |
286 (inn->limit_in_pages << 23) |
287 0x7000;
288 if (!oldmode)
289 entry_2 |= (inn->useable << 20);
291 /* Install the new entry ... */
292 install:
293 out->LdtEnt.Words.word1 = entry_1;
294 out->LdtEnt.Words.word2 = entry_2;
297 /* Create initial GDT. */
298 static VexGuestX86SegDescr* alloc_system_x86_GDT ( void )
300 Int nbytes = VEX_GUEST_X86_GDT_NENT * sizeof(VexGuestX86SegDescr);
301 VexGuestX86SegDescr* gdt = VG_(calloc)("di.syswrap-x86.azxG.1", nbytes, 1);
302 vki_modify_ldt_t info;
303 UShort seg;
305 VG_(memset)(&info, 0, sizeof(info));
306 info.entry_number = 0;
307 info.base_addr = 0;
308 info.limit = 0xfffff;
309 info.seg_32bit = 1;
310 info.contents = 0;
311 info.read_exec_only = 0;
312 info.limit_in_pages = 1;
313 info.seg_not_present = 0;
314 info.useable = 0;
315 info.reserved = 0;
317 asm volatile("movw %%ds, %0" : : "m" (seg));
318 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
319 asm volatile("movw %%ss, %0" : : "m" (seg));
320 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
322 info.contents = 2;
324 asm volatile("movw %%cs, %0" : : "m" (seg));
325 if (!(seg & 4)) translate_to_hw_format(&info, &gdt[seg >> 3], 0);
327 return gdt;
330 /* Create a zeroed-out LDT. */
331 static VexGuestX86SegDescr* alloc_zeroed_x86_LDT ( void )
333 Int nbytes = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
334 return VG_(calloc)("di.syswrap-x86.azxL.1", nbytes, 1);
337 /* Free up an LDT or GDT allocated by the above fns. */
338 static void free_LDT_or_GDT ( VexGuestX86SegDescr* dt )
340 vg_assert(dt);
341 VG_(free)(dt);
344 /* Copy contents between two existing LDTs. */
345 static void copy_LDT_from_to ( VexGuestX86SegDescr* src,
346 VexGuestX86SegDescr* dst )
348 Int i;
349 vg_assert(src);
350 vg_assert(dst);
351 for (i = 0; i < VEX_GUEST_X86_LDT_NENT; i++)
352 dst[i] = src[i];
355 /* Copy contents between two existing GDTs. */
356 static void copy_GDT_from_to ( VexGuestX86SegDescr* src,
357 VexGuestX86SegDescr* dst )
359 Int i;
360 vg_assert(src);
361 vg_assert(dst);
362 for (i = 0; i < VEX_GUEST_X86_GDT_NENT; i++)
363 dst[i] = src[i];
366 /* Free this thread's DTs, if it has any. */
367 static void deallocate_LGDTs_for_thread ( VexGuestX86State* vex )
369 vg_assert(sizeof(HWord) == sizeof(void*));
371 if (0)
372 VG_(printf)("deallocate_LGDTs_for_thread: "
373 "ldt = 0x%llx, gdt = 0x%llx\n",
374 vex->guest_LDT, vex->guest_GDT );
376 if (vex->guest_LDT != (HWord)NULL) {
377 free_LDT_or_GDT( (VexGuestX86SegDescr*)(HWord)vex->guest_LDT );
378 vex->guest_LDT = (HWord)NULL;
381 if (vex->guest_GDT != (HWord)NULL) {
382 free_LDT_or_GDT( (VexGuestX86SegDescr*)(HWord)vex->guest_GDT );
383 vex->guest_GDT = (HWord)NULL;
389 * linux/kernel/ldt.c
391 * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
392 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
396 * read_ldt() is not really atomic - this is not a problem since
397 * synchronization of reads and writes done to the LDT has to be
398 * assured by user-space anyway. Writes are atomic, to protect
399 * the security checks done on new descriptors.
401 static
402 SysRes read_ldt ( ThreadId tid, UChar* ptr, UInt bytecount )
404 SysRes res;
405 UInt i, size;
406 UChar* ldt;
408 if (0)
409 VG_(printf)("read_ldt: tid = %u, ptr = %p, bytecount = %u\n",
410 tid, ptr, bytecount );
412 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
413 vg_assert(8 == sizeof(VexGuestX86SegDescr));
415 ldt = (UChar*)(HWord)(VG_(threads)[tid].arch.vex.guest_LDT);
416 res = VG_(mk_SysRes_Success)( 0 );
417 if (ldt == NULL)
418 /* LDT not allocated, meaning all entries are null */
419 goto out;
421 size = VEX_GUEST_X86_LDT_NENT * sizeof(VexGuestX86SegDescr);
422 if (size > bytecount)
423 size = bytecount;
425 res = VG_(mk_SysRes_Success)( size );
426 for (i = 0; i < size; i++)
427 ptr[i] = ldt[i];
429 out:
430 return res;
434 static
435 SysRes write_ldt ( ThreadId tid, void* ptr, UInt bytecount, Int oldmode )
437 SysRes res;
438 VexGuestX86SegDescr* ldt;
439 vki_modify_ldt_t* ldt_info;
441 if (0)
442 VG_(printf)("write_ldt: tid = %u, ptr = %p, "
443 "bytecount = %u, oldmode = %d\n",
444 tid, ptr, bytecount, oldmode );
446 vg_assert(8 == sizeof(VexGuestX86SegDescr));
447 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
449 ldt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_LDT;
450 ldt_info = (vki_modify_ldt_t*)ptr;
452 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
453 if (bytecount != sizeof(vki_modify_ldt_t))
454 goto out;
456 res = VG_(mk_SysRes_Error)( VKI_EINVAL );
457 if (ldt_info->entry_number >= VEX_GUEST_X86_LDT_NENT)
458 goto out;
459 if (ldt_info->contents == 3) {
460 if (oldmode)
461 goto out;
462 if (ldt_info->seg_not_present == 0)
463 goto out;
466 /* If this thread doesn't have an LDT, we'd better allocate it
467 now. */
468 if (ldt == NULL) {
469 ldt = alloc_zeroed_x86_LDT();
470 VG_(threads)[tid].arch.vex.guest_LDT = (HWord)ldt;
473 /* Install the new entry ... */
474 translate_to_hw_format ( ldt_info, &ldt[ldt_info->entry_number], oldmode );
475 res = VG_(mk_SysRes_Success)( 0 );
477 out:
478 return res;
482 static SysRes sys_modify_ldt ( ThreadId tid,
483 Int func, void* ptr, UInt bytecount )
485 /* Set return value to something "safe". I think this will never
486 actually be returned, though. */
487 SysRes ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
489 if (func != 0 && func != 1 && func != 2 && func != 0x11) {
490 ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
491 } else if (ptr != NULL && ! ML_(safe_to_deref)(ptr, bytecount)) {
492 ret = VG_(mk_SysRes_Error)( VKI_EFAULT );
493 } else {
494 switch (func) {
495 case 0:
496 ret = read_ldt(tid, ptr, bytecount);
497 break;
498 case 1:
499 ret = write_ldt(tid, ptr, bytecount, 1);
500 break;
501 case 2:
502 ret = VG_(mk_SysRes_Error)( VKI_ENOSYS );
503 VG_(unimplemented)("sys_modify_ldt: func == 2");
504 /* god knows what this is about */
505 /* ret = read_default_ldt(ptr, bytecount); */
506 /*UNREACHED*/
507 break;
508 case 0x11:
509 ret = write_ldt(tid, ptr, bytecount, 0);
510 break;
513 return ret;
517 SysRes ML_(x86_sys_set_thread_area) ( ThreadId tid, vki_modify_ldt_t* info )
519 Int idx;
520 VexGuestX86SegDescr* gdt;
522 vg_assert(8 == sizeof(VexGuestX86SegDescr));
523 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
525 if (info == NULL || ! ML_(safe_to_deref)(info, sizeof(vki_modify_ldt_t))) {
526 VG_(umsg)("Warning: bad u_info address %p in set_thread_area\n", info);
527 return VG_(mk_SysRes_Error)( VKI_EFAULT );
530 gdt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_GDT;
532 /* If the thread doesn't have a GDT, allocate it now. */
533 if (!gdt) {
534 gdt = alloc_system_x86_GDT();
535 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
538 idx = info->entry_number;
540 if (idx == -1) {
541 /* Find and use the first free entry. Don't allocate entry
542 zero, because the hardware will never do that, and apparently
543 doing so confuses some code (perhaps stuff running on
544 Wine). */
545 for (idx = 1; idx < VEX_GUEST_X86_GDT_NENT; idx++) {
546 if (gdt[idx].LdtEnt.Words.word1 == 0
547 && gdt[idx].LdtEnt.Words.word2 == 0)
548 break;
551 if (idx == VEX_GUEST_X86_GDT_NENT)
552 return VG_(mk_SysRes_Error)( VKI_ESRCH );
553 } else if (idx < 0 || idx == 0 || idx >= VEX_GUEST_X86_GDT_NENT) {
554 /* Similarly, reject attempts to use GDT[0]. */
555 return VG_(mk_SysRes_Error)( VKI_EINVAL );
558 translate_to_hw_format(info, &gdt[idx], 0);
560 VG_TRACK( pre_mem_write, Vg_CoreSysCall, tid,
561 "set_thread_area(info->entry)",
562 (Addr) & info->entry_number, sizeof(unsigned int) );
563 info->entry_number = idx;
564 VG_TRACK( post_mem_write, Vg_CoreSysCall, tid,
565 (Addr) & info->entry_number, sizeof(unsigned int) );
567 return VG_(mk_SysRes_Success)( 0 );
571 static SysRes sys_get_thread_area ( ThreadId tid, vki_modify_ldt_t* info )
573 Int idx;
574 VexGuestX86SegDescr* gdt;
576 vg_assert(sizeof(HWord) == sizeof(VexGuestX86SegDescr*));
577 vg_assert(8 == sizeof(VexGuestX86SegDescr));
579 if (info == NULL || ! ML_(safe_to_deref)(info, sizeof(vki_modify_ldt_t))) {
580 VG_(umsg)("Warning: bad u_info address %p in get_thread_area\n", info);
581 return VG_(mk_SysRes_Error)( VKI_EFAULT );
584 idx = info->entry_number;
586 if (idx < 0 || idx >= VEX_GUEST_X86_GDT_NENT)
587 return VG_(mk_SysRes_Error)( VKI_EINVAL );
589 gdt = (VexGuestX86SegDescr*)(HWord)VG_(threads)[tid].arch.vex.guest_GDT;
591 /* If the thread doesn't have a GDT, allocate it now. */
592 if (!gdt) {
593 gdt = alloc_system_x86_GDT();
594 VG_(threads)[tid].arch.vex.guest_GDT = (HWord)gdt;
597 info->base_addr = ( gdt[idx].LdtEnt.Bits.BaseHi << 24 ) |
598 ( gdt[idx].LdtEnt.Bits.BaseMid << 16 ) |
599 gdt[idx].LdtEnt.Bits.BaseLow;
600 info->limit = ( gdt[idx].LdtEnt.Bits.LimitHi << 16 ) |
601 gdt[idx].LdtEnt.Bits.LimitLow;
602 info->seg_32bit = gdt[idx].LdtEnt.Bits.Default_Big;
603 info->contents = ( gdt[idx].LdtEnt.Bits.Type >> 2 ) & 0x3;
604 info->read_exec_only = ( gdt[idx].LdtEnt.Bits.Type & 0x1 ) ^ 0x1;
605 info->limit_in_pages = gdt[idx].LdtEnt.Bits.Granularity;
606 info->seg_not_present = gdt[idx].LdtEnt.Bits.Pres ^ 0x1;
607 info->useable = gdt[idx].LdtEnt.Bits.Sys;
608 info->reserved = 0;
610 return VG_(mk_SysRes_Success)( 0 );
613 /* ---------------------------------------------------------------------
614 More thread stuff
615 ------------------------------------------------------------------ */
617 void VG_(cleanup_thread) ( ThreadArchState* arch )
619 /* Release arch-specific resources held by this thread. */
620 /* On x86, we have to dump the LDT and GDT. */
621 deallocate_LGDTs_for_thread( &arch->vex );
625 void ML_(x86_setup_LDT_GDT) ( /*OUT*/ ThreadArchState *child,
626 /*IN*/ ThreadArchState *parent )
628 /* We inherit our parent's LDT. */
629 if (parent->vex.guest_LDT == (HWord)NULL) {
630 /* We hope this is the common case. */
631 child->vex.guest_LDT = (HWord)NULL;
632 } else {
633 /* No luck .. we have to take a copy of the parent's. */
634 child->vex.guest_LDT = (HWord)alloc_zeroed_x86_LDT();
635 copy_LDT_from_to( (VexGuestX86SegDescr*)(HWord)parent->vex.guest_LDT,
636 (VexGuestX86SegDescr*)(HWord)child->vex.guest_LDT );
639 /* Either we start with an empty GDT (the usual case) or inherit a
640 copy of our parents' one (Quadrics Elan3 driver -style clone
641 only). */
642 child->vex.guest_GDT = (HWord)NULL;
644 if (parent->vex.guest_GDT != (HWord)NULL) {
645 child->vex.guest_GDT = (HWord)alloc_system_x86_GDT();
646 copy_GDT_from_to( (VexGuestX86SegDescr*)(HWord)parent->vex.guest_GDT,
647 (VexGuestX86SegDescr*)(HWord)child->vex.guest_GDT );
652 /* ---------------------------------------------------------------------
653 PRE/POST wrappers for x86/Linux-specific syscalls
654 ------------------------------------------------------------------ */
656 #define PRE(name) DEFN_PRE_TEMPLATE(x86_linux, name)
657 #define POST(name) DEFN_POST_TEMPLATE(x86_linux, name)
659 /* Add prototypes for the wrappers declared here, so that gcc doesn't
660 harass us for not having prototypes. Really this is a kludge --
661 the right thing to do is to make these wrappers 'static' since they
662 aren't visible outside this file, but that requires even more macro
663 magic. */
664 DECL_TEMPLATE(x86_linux, sys_stat64);
665 DECL_TEMPLATE(x86_linux, sys_fstatat64);
666 DECL_TEMPLATE(x86_linux, sys_fstat64);
667 DECL_TEMPLATE(x86_linux, sys_lstat64);
668 DECL_TEMPLATE(x86_linux, old_mmap);
669 DECL_TEMPLATE(x86_linux, sys_mmap2);
670 DECL_TEMPLATE(x86_linux, sys_sigreturn);
671 DECL_TEMPLATE(x86_linux, sys_rt_sigreturn);
672 DECL_TEMPLATE(x86_linux, sys_modify_ldt);
673 DECL_TEMPLATE(x86_linux, sys_set_thread_area);
674 DECL_TEMPLATE(x86_linux, sys_get_thread_area);
675 DECL_TEMPLATE(x86_linux, sys_ptrace);
676 DECL_TEMPLATE(x86_linux, sys_sigsuspend);
677 DECL_TEMPLATE(x86_linux, old_select);
678 DECL_TEMPLATE(x86_linux, sys_vm86old);
679 DECL_TEMPLATE(x86_linux, sys_vm86);
680 DECL_TEMPLATE(x86_linux, sys_syscall223);
682 PRE(old_select)
684 /* struct sel_arg_struct {
685 unsigned long n;
686 fd_set *inp, *outp, *exp;
687 struct timeval *tvp;
690 PRE_REG_READ1(long, "old_select", struct sel_arg_struct *, args);
691 PRE_MEM_READ( "old_select(args)", ARG1, 5*sizeof(UWord) );
692 *flags |= SfMayBlock;
694 UInt* arg_struct = (UInt*)ARG1;
695 UInt a1, a2, a3, a4, a5;
697 a1 = arg_struct[0];
698 a2 = arg_struct[1];
699 a3 = arg_struct[2];
700 a4 = arg_struct[3];
701 a5 = arg_struct[4];
703 PRINT("old_select ( %d, %#x, %#x, %#x, %#x )", (Int)a1,a2,a3,a4,a5);
704 if (a2 != (Addr)NULL)
705 PRE_MEM_READ( "old_select(readfds)", a2, a1/8 /* __FD_SETSIZE/8 */ );
706 if (a3 != (Addr)NULL)
707 PRE_MEM_READ( "old_select(writefds)", a3, a1/8 /* __FD_SETSIZE/8 */ );
708 if (a4 != (Addr)NULL)
709 PRE_MEM_READ( "old_select(exceptfds)", a4, a1/8 /* __FD_SETSIZE/8 */ );
710 if (a5 != (Addr)NULL)
711 PRE_MEM_READ( "old_select(timeout)", a5, sizeof(struct vki_timeval) );
715 PRE(sys_sigreturn)
717 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
718 an explanation of what follows. */
720 ThreadState* tst;
721 PRINT("sys_sigreturn ( )");
723 vg_assert(VG_(is_valid_tid)(tid));
724 vg_assert(tid >= 1 && tid < VG_N_THREADS);
725 vg_assert(VG_(is_running_thread)(tid));
727 /* Adjust esp to point to start of frame; skip back up over
728 sigreturn sequence's "popl %eax" and handler ret addr */
729 tst = VG_(get_ThreadState)(tid);
730 tst->arch.vex.guest_ESP -= sizeof(Addr)+sizeof(Word);
731 /* XXX why does ESP change differ from rt_sigreturn case below? */
733 /* This is only so that the EIP is (might be) useful to report if
734 something goes wrong in the sigreturn */
735 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
737 /* Restore register state from frame and remove it */
738 VG_(sigframe_destroy)(tid, False);
740 /* Tell the driver not to update the guest state with the "result",
741 and set a bogus result to keep it happy. */
742 *flags |= SfNoWriteResult;
743 SET_STATUS_Success(0);
745 /* Check to see if any signals arose as a result of this. */
746 *flags |= SfPollAfter;
749 PRE(sys_rt_sigreturn)
751 /* See comments on PRE(sys_rt_sigreturn) in syswrap-amd64-linux.c for
752 an explanation of what follows. */
754 ThreadState* tst;
755 PRINT("sys_rt_sigreturn ( )");
757 vg_assert(VG_(is_valid_tid)(tid));
758 vg_assert(tid >= 1 && tid < VG_N_THREADS);
759 vg_assert(VG_(is_running_thread)(tid));
761 /* Adjust esp to point to start of frame; skip back up over handler
762 ret addr */
763 tst = VG_(get_ThreadState)(tid);
764 tst->arch.vex.guest_ESP -= sizeof(Addr);
765 /* XXX why does ESP change differ from sigreturn case above? */
767 /* This is only so that the EIP is (might be) useful to report if
768 something goes wrong in the sigreturn */
769 ML_(fixup_guest_state_to_restart_syscall)(&tst->arch);
771 /* Restore register state from frame and remove it */
772 VG_(sigframe_destroy)(tid, True);
774 /* Tell the driver not to update the guest state with the "result",
775 and set a bogus result to keep it happy. */
776 *flags |= SfNoWriteResult;
777 SET_STATUS_Success(0);
779 /* Check to see if any signals arose as a result of this. */
780 *flags |= SfPollAfter;
783 PRE(sys_modify_ldt)
785 PRINT("sys_modify_ldt ( %ld, %#lx, %lu )", SARG1, ARG2, ARG3);
786 PRE_REG_READ3(int, "modify_ldt", int, func, void *, ptr,
787 unsigned long, bytecount);
789 if (ARG1 == 0) {
790 /* read the LDT into ptr */
791 PRE_MEM_WRITE( "modify_ldt(ptr)", ARG2, ARG3 );
793 if (ARG1 == 1 || ARG1 == 0x11) {
794 /* write the LDT with the entry pointed at by ptr */
795 PRE_MEM_READ( "modify_ldt(ptr)", ARG2, sizeof(vki_modify_ldt_t) );
797 /* "do" the syscall ourselves; the kernel never sees it */
798 SET_STATUS_from_SysRes( sys_modify_ldt( tid, ARG1, (void*)ARG2, ARG3 ) );
800 if (ARG1 == 0 && SUCCESS && RES > 0) {
801 POST_MEM_WRITE( ARG2, RES );
805 PRE(sys_set_thread_area)
807 PRINT("sys_set_thread_area ( %#lx )", ARG1);
808 PRE_REG_READ1(int, "set_thread_area", struct user_desc *, u_info)
809 PRE_MEM_READ( "set_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
811 /* "do" the syscall ourselves; the kernel never sees it */
812 SET_STATUS_from_SysRes( ML_(x86_sys_set_thread_area)( tid, (void *)ARG1 ) );
815 PRE(sys_get_thread_area)
817 PRINT("sys_get_thread_area ( %#lx )", ARG1);
818 PRE_REG_READ1(int, "get_thread_area", struct user_desc *, u_info)
819 PRE_MEM_WRITE( "get_thread_area(u_info)", ARG1, sizeof(vki_modify_ldt_t) );
821 /* "do" the syscall ourselves; the kernel never sees it */
822 SET_STATUS_from_SysRes( sys_get_thread_area( tid, (void *)ARG1 ) );
824 if (SUCCESS) {
825 POST_MEM_WRITE( ARG1, sizeof(vki_modify_ldt_t) );
829 // Parts of this are x86-specific, but the *PEEK* cases are generic.
831 // ARG3 is only used for pointers into the traced process's address
832 // space and for offsets into the traced process's struct
833 // user_regs_struct. It is never a pointer into this process's memory
834 // space, and we should therefore not check anything it points to.
835 PRE(sys_ptrace)
837 PRINT("sys_ptrace ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
838 PRE_REG_READ4(int, "ptrace",
839 long, request, long, pid, unsigned long, addr,
840 unsigned long, data);
841 switch (ARG1) {
842 case VKI_PTRACE_PEEKTEXT:
843 case VKI_PTRACE_PEEKDATA:
844 case VKI_PTRACE_PEEKUSR:
845 PRE_MEM_WRITE( "ptrace(peek)", ARG4,
846 sizeof (long));
847 break;
848 case VKI_PTRACE_GETREGS:
849 PRE_MEM_WRITE( "ptrace(getregs)", ARG4,
850 sizeof (struct vki_user_regs_struct));
851 break;
852 case VKI_PTRACE_GETFPREGS:
853 PRE_MEM_WRITE( "ptrace(getfpregs)", ARG4,
854 sizeof (struct vki_user_i387_struct));
855 break;
856 case VKI_PTRACE_GETFPXREGS:
857 PRE_MEM_WRITE( "ptrace(getfpxregs)", ARG4,
858 sizeof(struct vki_user_fxsr_struct) );
859 break;
860 case VKI_PTRACE_GET_THREAD_AREA:
861 PRE_MEM_WRITE( "ptrace(get_thread_area)", ARG4,
862 sizeof(struct vki_user_desc) );
863 break;
864 case VKI_PTRACE_SETREGS:
865 PRE_MEM_READ( "ptrace(setregs)", ARG4,
866 sizeof (struct vki_user_regs_struct));
867 break;
868 case VKI_PTRACE_SETFPREGS:
869 PRE_MEM_READ( "ptrace(setfpregs)", ARG4,
870 sizeof (struct vki_user_i387_struct));
871 break;
872 case VKI_PTRACE_SETFPXREGS:
873 PRE_MEM_READ( "ptrace(setfpxregs)", ARG4,
874 sizeof(struct vki_user_fxsr_struct) );
875 break;
876 case VKI_PTRACE_SET_THREAD_AREA:
877 PRE_MEM_READ( "ptrace(set_thread_area)", ARG4,
878 sizeof(struct vki_user_desc) );
879 break;
880 case VKI_PTRACE_GETEVENTMSG:
881 PRE_MEM_WRITE( "ptrace(geteventmsg)", ARG4, sizeof(unsigned long));
882 break;
883 case VKI_PTRACE_GETSIGINFO:
884 PRE_MEM_WRITE( "ptrace(getsiginfo)", ARG4, sizeof(vki_siginfo_t));
885 break;
886 case VKI_PTRACE_SETSIGINFO:
887 PRE_MEM_READ( "ptrace(setsiginfo)", ARG4, sizeof(vki_siginfo_t));
888 break;
889 case VKI_PTRACE_GETREGSET:
890 ML_(linux_PRE_getregset)(tid, ARG3, ARG4);
891 break;
892 case VKI_PTRACE_SETREGSET:
893 ML_(linux_PRE_setregset)(tid, ARG3, ARG4);
894 break;
895 default:
896 break;
900 POST(sys_ptrace)
902 switch (ARG1) {
903 case VKI_PTRACE_TRACEME:
904 ML_(linux_POST_traceme)(tid);
905 break;
906 case VKI_PTRACE_PEEKTEXT:
907 case VKI_PTRACE_PEEKDATA:
908 case VKI_PTRACE_PEEKUSR:
909 POST_MEM_WRITE( ARG4, sizeof (long));
910 break;
911 case VKI_PTRACE_GETREGS:
912 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_regs_struct));
913 break;
914 case VKI_PTRACE_GETFPREGS:
915 POST_MEM_WRITE( ARG4, sizeof (struct vki_user_i387_struct));
916 break;
917 case VKI_PTRACE_GETFPXREGS:
918 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_fxsr_struct) );
919 break;
920 case VKI_PTRACE_GET_THREAD_AREA:
921 POST_MEM_WRITE( ARG4, sizeof(struct vki_user_desc) );
922 break;
923 case VKI_PTRACE_GETEVENTMSG:
924 POST_MEM_WRITE( ARG4, sizeof(unsigned long));
925 break;
926 case VKI_PTRACE_GETSIGINFO:
927 /* XXX: This is a simplification. Different parts of the
928 * siginfo_t are valid depending on the type of signal.
930 POST_MEM_WRITE( ARG4, sizeof(vki_siginfo_t));
931 break;
932 case VKI_PTRACE_GETREGSET:
933 ML_(linux_POST_getregset)(tid, ARG3, ARG4);
934 break;
935 default:
936 break;
940 PRE(old_mmap)
942 /* struct mmap_arg_struct {
943 unsigned long addr;
944 unsigned long len;
945 unsigned long prot;
946 unsigned long flags;
947 unsigned long fd;
948 unsigned long offset;
949 }; */
950 UWord a1, a2, a3, a4, a5, a6;
951 SysRes r;
953 UWord* args = (UWord*)ARG1;
954 PRE_REG_READ1(long, "old_mmap", struct mmap_arg_struct *, args);
955 PRE_MEM_READ( "old_mmap(args)", (Addr)args, 6*sizeof(UWord) );
957 a1 = args[1-1];
958 a2 = args[2-1];
959 a3 = args[3-1];
960 a4 = args[4-1];
961 a5 = args[5-1];
962 a6 = args[6-1];
964 PRINT("old_mmap ( %#lx, %lu, %ld, %ld, %ld, %ld )",
965 a1, a2, (Word)a3, (Word)a4, (Word)a5, (Word)a6 );
967 r = ML_(generic_PRE_sys_mmap)( tid, a1, a2, a3, a4, a5, (Off64T)a6 );
968 SET_STATUS_from_SysRes(r);
971 PRE(sys_mmap2)
973 SysRes r;
975 // Exactly like old_mmap() except:
976 // - all 6 args are passed in regs, rather than in a memory-block.
977 // - the file offset is specified in pagesize units rather than bytes,
978 // so that it can be used for files bigger than 2^32 bytes.
979 // pagesize or 4K-size units in offset? For ppc32/64-linux, this is
980 // 4K-sized. Assert that the page size is 4K here for safety.
981 vg_assert(VKI_PAGE_SIZE == 4096);
982 PRINT("sys_mmap2 ( %#lx, %lu, %lu, %lu, %lu, %lu )",
983 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6 );
984 PRE_REG_READ6(long, "mmap2",
985 unsigned long, start, unsigned long, length,
986 unsigned long, prot, unsigned long, flags,
987 unsigned long, fd, unsigned long, offset);
989 r = ML_(generic_PRE_sys_mmap)( tid, ARG1, ARG2, ARG3, ARG4, ARG5,
990 4096 * (Off64T)ARG6 );
991 SET_STATUS_from_SysRes(r);
994 // XXX: lstat64/fstat64/stat64 are generic, but not necessarily
995 // applicable to every architecture -- I think only to 32-bit archs.
996 // We're going to need something like linux/core_os32.h for such
997 // things, eventually, I think. --njn
998 PRE(sys_lstat64)
1000 PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
1001 PRE_REG_READ2(long, "lstat64", char *, file_name, struct stat64 *, buf);
1002 PRE_MEM_RASCIIZ( "lstat64(file_name)", ARG1 );
1003 PRE_MEM_WRITE( "lstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1006 POST(sys_lstat64)
1008 vg_assert(SUCCESS);
1009 if (RES == 0) {
1010 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1014 PRE(sys_stat64)
1016 FUSE_COMPATIBLE_MAY_BLOCK();
1017 PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (HChar*)ARG1, ARG2);
1018 PRE_REG_READ2(long, "stat64", char *, file_name, struct stat64 *, buf);
1019 PRE_MEM_RASCIIZ( "stat64(file_name)", ARG1 );
1020 PRE_MEM_WRITE( "stat64(buf)", ARG2, sizeof(struct vki_stat64) );
1023 POST(sys_stat64)
1025 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1028 PRE(sys_fstatat64)
1030 FUSE_COMPATIBLE_MAY_BLOCK();
1031 // ARG4 = int flags; Flags are or'ed together, therefore writing them
1032 // as a hex constant is more meaningful.
1033 PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %#lx )",
1034 SARG1, ARG2, (HChar*)ARG2, ARG3, ARG4);
1035 PRE_REG_READ4(long, "fstatat64",
1036 int, dfd, char *, file_name, struct stat64 *, buf, int, flags);
1037 PRE_MEM_RASCIIZ( "fstatat64(file_name)", ARG2 );
1038 PRE_MEM_WRITE( "fstatat64(buf)", ARG3, sizeof(struct vki_stat64) );
1041 POST(sys_fstatat64)
1043 POST_MEM_WRITE( ARG3, sizeof(struct vki_stat64) );
1046 PRE(sys_fstat64)
1048 PRINT("sys_fstat64 ( %lu, %#lx )", ARG1, ARG2);
1049 PRE_REG_READ2(long, "fstat64", unsigned long, fd, struct stat64 *, buf);
1050 PRE_MEM_WRITE( "fstat64(buf)", ARG2, sizeof(struct vki_stat64) );
1053 POST(sys_fstat64)
1055 POST_MEM_WRITE( ARG2, sizeof(struct vki_stat64) );
1058 /* NB: arm-linux has a clone of this one, and ppc32-linux has an almost
1059 identical version. */
1060 PRE(sys_sigsuspend)
1062 /* The C library interface to sigsuspend just takes a pointer to
1063 a signal mask but this system call has three arguments - the first
1064 two don't appear to be used by the kernel and are always passed as
1065 zero by glibc and the third is the first word of the signal mask
1066 so only 32 signals are supported.
1068 In fact glibc normally uses rt_sigsuspend if it is available as
1069 that takes a pointer to the signal mask so supports more signals.
1071 *flags |= SfMayBlock;
1072 PRINT("sys_sigsuspend ( %ld, %ld, %lu )", SARG1, SARG2, ARG3 );
1073 PRE_REG_READ3(int, "sigsuspend",
1074 int, history0, int, history1,
1075 vki_old_sigset_t, mask);
1078 PRE(sys_vm86old)
1080 PRINT("sys_vm86old ( %#lx )", ARG1);
1081 PRE_REG_READ1(int, "vm86old", struct vm86_struct *, info);
1082 PRE_MEM_WRITE( "vm86old(info)", ARG1, sizeof(struct vki_vm86_struct));
1085 POST(sys_vm86old)
1087 POST_MEM_WRITE( ARG1, sizeof(struct vki_vm86_struct));
1090 PRE(sys_vm86)
1092 PRINT("sys_vm86 ( %lu, %#lx )", ARG1, ARG2);
1093 PRE_REG_READ2(int, "vm86", unsigned long, fn, struct vm86plus_struct *, v86);
1094 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1095 PRE_MEM_WRITE( "vm86(v86)", ARG2, sizeof(struct vki_vm86plus_struct));
1098 POST(sys_vm86)
1100 if (ARG1 == VKI_VM86_ENTER || ARG1 == VKI_VM86_ENTER_NO_BYPASS)
1101 POST_MEM_WRITE( ARG2, sizeof(struct vki_vm86plus_struct));
1105 /* ---------------------------------------------------------------
1106 PRE/POST wrappers for x86/Linux-variant specific syscalls
1107 ------------------------------------------------------------ */
1109 PRE(sys_syscall223)
1111 Int err;
1113 /* 223 is used by sys_bproc. If we're not on a declared bproc
1114 variant, fail in the usual way. */
1116 if (!KernelVariantiS(KernelVariant_bproc, VG_(clo_kernel_variant))) {
1117 PRINT("non-existent syscall! (syscall 223)");
1118 PRE_REG_READ0(long, "ni_syscall(223)");
1119 SET_STATUS_Failure( VKI_ENOSYS );
1120 return;
1123 err = ML_(linux_variant_PRE_sys_bproc)( ARG1, ARG2, ARG3,
1124 ARG4, ARG5, ARG6 );
1125 if (err) {
1126 SET_STATUS_Failure( err );
1127 return;
1129 /* Let it go through. */
1130 *flags |= SfMayBlock; /* who knows? play safe. */
1133 POST(sys_syscall223)
1135 ML_(linux_variant_POST_sys_bproc)( ARG1, ARG2, ARG3,
1136 ARG4, ARG5, ARG6 );
1139 #undef PRE
1140 #undef POST
1143 /* ---------------------------------------------------------------------
1144 The x86/Linux syscall table
1145 ------------------------------------------------------------------ */
1147 /* Add an x86-linux specific wrapper to a syscall table. */
1148 #define PLAX_(sysno, name) WRAPPER_ENTRY_X_(x86_linux, sysno, name)
1149 #define PLAXY(sysno, name) WRAPPER_ENTRY_XY(x86_linux, sysno, name)
1152 // This table maps from __NR_xxx syscall numbers (from
1153 // linux/include/asm-i386/unistd.h) to the appropriate PRE/POST sys_foo()
1154 // wrappers on x86 (as per sys_call_table in linux/arch/i386/kernel/entry.S).
1156 // For those syscalls not handled by Valgrind, the annotation indicate its
1157 // arch/OS combination, eg. */* (generic), */Linux (Linux only), ?/?
1158 // (unknown).
1160 static SyscallTableEntry syscall_table[] = {
1161 //zz // (restart_syscall) // 0
1162 GENX_(__NR_exit, sys_exit), // 1
1163 GENX_(__NR_fork, sys_fork), // 2
1164 GENXY(__NR_read, sys_read), // 3
1165 GENX_(__NR_write, sys_write), // 4
1167 GENXY(__NR_open, sys_open), // 5
1168 GENXY(__NR_close, sys_close), // 6
1169 GENXY(__NR_waitpid, sys_waitpid), // 7
1170 GENXY(__NR_creat, sys_creat), // 8
1171 GENX_(__NR_link, sys_link), // 9
1173 GENX_(__NR_unlink, sys_unlink), // 10
1174 GENX_(__NR_execve, sys_execve), // 11
1175 GENX_(__NR_chdir, sys_chdir), // 12
1176 GENXY(__NR_time, sys_time), // 13
1177 GENX_(__NR_mknod, sys_mknod), // 14
1179 GENX_(__NR_chmod, sys_chmod), // 15
1180 //zz LINX_(__NR_lchown, sys_lchown16), // 16
1181 GENX_(__NR_break, sys_ni_syscall), // 17
1182 //zz // (__NR_oldstat, sys_stat), // 18 (obsolete)
1183 LINX_(__NR_lseek, sys_lseek), // 19
1185 GENX_(__NR_getpid, sys_getpid), // 20
1186 LINX_(__NR_mount, sys_mount), // 21
1187 LINX_(__NR_umount, sys_oldumount), // 22
1188 LINX_(__NR_setuid, sys_setuid16), // 23 ## P
1189 LINX_(__NR_getuid, sys_getuid16), // 24 ## P
1191 LINX_(__NR_stime, sys_stime), // 25 * (SVr4,SVID,X/OPEN)
1192 PLAXY(__NR_ptrace, sys_ptrace), // 26
1193 GENX_(__NR_alarm, sys_alarm), // 27
1194 //zz // (__NR_oldfstat, sys_fstat), // 28 * L -- obsolete
1195 GENX_(__NR_pause, sys_pause), // 29
1197 LINX_(__NR_utime, sys_utime), // 30
1198 GENX_(__NR_stty, sys_ni_syscall), // 31
1199 GENX_(__NR_gtty, sys_ni_syscall), // 32
1200 GENX_(__NR_access, sys_access), // 33
1201 GENX_(__NR_nice, sys_nice), // 34
1203 GENX_(__NR_ftime, sys_ni_syscall), // 35
1204 GENX_(__NR_sync, sys_sync), // 36
1205 GENX_(__NR_kill, sys_kill), // 37
1206 GENX_(__NR_rename, sys_rename), // 38
1207 GENX_(__NR_mkdir, sys_mkdir), // 39
1209 GENX_(__NR_rmdir, sys_rmdir), // 40
1210 GENXY(__NR_dup, sys_dup), // 41
1211 LINXY(__NR_pipe, sys_pipe), // 42
1212 GENXY(__NR_times, sys_times), // 43
1213 GENX_(__NR_prof, sys_ni_syscall), // 44
1214 //zz
1215 GENX_(__NR_brk, sys_brk), // 45
1216 LINX_(__NR_setgid, sys_setgid16), // 46
1217 LINX_(__NR_getgid, sys_getgid16), // 47
1218 //zz // (__NR_signal, sys_signal), // 48 */* (ANSI C)
1219 LINX_(__NR_geteuid, sys_geteuid16), // 49
1221 LINX_(__NR_getegid, sys_getegid16), // 50
1222 GENX_(__NR_acct, sys_acct), // 51
1223 LINX_(__NR_umount2, sys_umount), // 52
1224 GENX_(__NR_lock, sys_ni_syscall), // 53
1225 LINXY(__NR_ioctl, sys_ioctl), // 54
1227 LINXY(__NR_fcntl, sys_fcntl), // 55
1228 GENX_(__NR_mpx, sys_ni_syscall), // 56
1229 GENX_(__NR_setpgid, sys_setpgid), // 57
1230 GENX_(__NR_ulimit, sys_ni_syscall), // 58
1231 //zz // (__NR_oldolduname, sys_olduname), // 59 Linux -- obsolete
1232 //zz
1233 GENX_(__NR_umask, sys_umask), // 60
1234 GENX_(__NR_chroot, sys_chroot), // 61
1235 //zz // (__NR_ustat, sys_ustat) // 62 SVr4 -- deprecated
1236 GENXY(__NR_dup2, sys_dup2), // 63
1237 GENX_(__NR_getppid, sys_getppid), // 64
1239 GENX_(__NR_getpgrp, sys_getpgrp), // 65
1240 GENX_(__NR_setsid, sys_setsid), // 66
1241 LINXY(__NR_sigaction, sys_sigaction), // 67
1242 //zz // (__NR_sgetmask, sys_sgetmask), // 68 */* (ANSI C)
1243 //zz // (__NR_ssetmask, sys_ssetmask), // 69 */* (ANSI C)
1244 //zz
1245 LINX_(__NR_setreuid, sys_setreuid16), // 70
1246 LINX_(__NR_setregid, sys_setregid16), // 71
1247 PLAX_(__NR_sigsuspend, sys_sigsuspend), // 72
1248 LINXY(__NR_sigpending, sys_sigpending), // 73
1249 GENX_(__NR_sethostname, sys_sethostname), // 74
1250 //zz
1251 GENX_(__NR_setrlimit, sys_setrlimit), // 75
1252 GENXY(__NR_getrlimit, sys_old_getrlimit), // 76
1253 GENXY(__NR_getrusage, sys_getrusage), // 77
1254 GENXY(__NR_gettimeofday, sys_gettimeofday), // 78
1255 GENX_(__NR_settimeofday, sys_settimeofday), // 79
1257 LINXY(__NR_getgroups, sys_getgroups16), // 80
1258 LINX_(__NR_setgroups, sys_setgroups16), // 81
1259 PLAX_(__NR_select, old_select), // 82
1260 GENX_(__NR_symlink, sys_symlink), // 83
1261 //zz // (__NR_oldlstat, sys_lstat), // 84 -- obsolete
1262 //zz
1263 GENX_(__NR_readlink, sys_readlink), // 85
1264 //zz // (__NR_uselib, sys_uselib), // 86 */Linux
1265 //zz // (__NR_swapon, sys_swapon), // 87 */Linux
1266 //zz // (__NR_reboot, sys_reboot), // 88 */Linux
1267 //zz // (__NR_readdir, old_readdir), // 89 -- superseded
1268 //zz
1269 PLAX_(__NR_mmap, old_mmap), // 90
1270 GENXY(__NR_munmap, sys_munmap), // 91
1271 GENX_(__NR_truncate, sys_truncate), // 92
1272 GENX_(__NR_ftruncate, sys_ftruncate), // 93
1273 GENX_(__NR_fchmod, sys_fchmod), // 94
1275 LINX_(__NR_fchown, sys_fchown16), // 95
1276 GENX_(__NR_getpriority, sys_getpriority), // 96
1277 GENX_(__NR_setpriority, sys_setpriority), // 97
1278 GENX_(__NR_profil, sys_ni_syscall), // 98
1279 GENXY(__NR_statfs, sys_statfs), // 99
1281 GENXY(__NR_fstatfs, sys_fstatfs), // 100
1282 LINX_(__NR_ioperm, sys_ioperm), // 101
1283 LINXY(__NR_socketcall, sys_socketcall), // 102 x86/Linux-only
1284 LINXY(__NR_syslog, sys_syslog), // 103
1285 GENXY(__NR_setitimer, sys_setitimer), // 104
1287 GENXY(__NR_getitimer, sys_getitimer), // 105
1288 GENXY(__NR_stat, sys_newstat), // 106
1289 GENXY(__NR_lstat, sys_newlstat), // 107
1290 GENXY(__NR_fstat, sys_newfstat), // 108
1291 //zz // (__NR_olduname, sys_uname), // 109 -- obsolete
1292 //zz
1293 GENX_(__NR_iopl, sys_iopl), // 110
1294 LINX_(__NR_vhangup, sys_vhangup), // 111
1295 GENX_(__NR_idle, sys_ni_syscall), // 112
1296 PLAXY(__NR_vm86old, sys_vm86old), // 113 x86/Linux-only
1297 GENXY(__NR_wait4, sys_wait4), // 114
1298 //zz
1299 //zz // (__NR_swapoff, sys_swapoff), // 115 */Linux
1300 LINXY(__NR_sysinfo, sys_sysinfo), // 116
1301 LINXY(__NR_ipc, sys_ipc), // 117
1302 GENX_(__NR_fsync, sys_fsync), // 118
1303 PLAX_(__NR_sigreturn, sys_sigreturn), // 119 ?/Linux
1305 LINX_(__NR_clone, sys_clone), // 120
1306 //zz // (__NR_setdomainname, sys_setdomainname), // 121 */*(?)
1307 GENXY(__NR_uname, sys_newuname), // 122
1308 PLAX_(__NR_modify_ldt, sys_modify_ldt), // 123
1309 LINXY(__NR_adjtimex, sys_adjtimex), // 124
1311 GENXY(__NR_mprotect, sys_mprotect), // 125
1312 LINXY(__NR_sigprocmask, sys_sigprocmask), // 126
1313 //zz // Nb: create_module() was removed 2.4-->2.6
1314 GENX_(__NR_create_module, sys_ni_syscall), // 127
1315 LINX_(__NR_init_module, sys_init_module), // 128
1316 LINX_(__NR_delete_module, sys_delete_module), // 129
1317 //zz
1318 //zz // Nb: get_kernel_syms() was removed 2.4-->2.6
1319 GENX_(__NR_get_kernel_syms, sys_ni_syscall), // 130
1320 LINX_(__NR_quotactl, sys_quotactl), // 131
1321 GENX_(__NR_getpgid, sys_getpgid), // 132
1322 GENX_(__NR_fchdir, sys_fchdir), // 133
1323 //zz // (__NR_bdflush, sys_bdflush), // 134 */Linux
1324 //zz
1325 //zz // (__NR_sysfs, sys_sysfs), // 135 SVr4
1326 LINX_(__NR_personality, sys_personality), // 136
1327 GENX_(__NR_afs_syscall, sys_ni_syscall), // 137
1328 LINX_(__NR_setfsuid, sys_setfsuid16), // 138
1329 LINX_(__NR_setfsgid, sys_setfsgid16), // 139
1331 LINXY(__NR__llseek, sys_llseek), // 140
1332 GENXY(__NR_getdents, sys_getdents), // 141
1333 GENX_(__NR__newselect, sys_select), // 142
1334 GENX_(__NR_flock, sys_flock), // 143
1335 GENX_(__NR_msync, sys_msync), // 144
1337 GENXY(__NR_readv, sys_readv), // 145
1338 GENX_(__NR_writev, sys_writev), // 146
1339 GENX_(__NR_getsid, sys_getsid), // 147
1340 GENX_(__NR_fdatasync, sys_fdatasync), // 148
1341 LINXY(__NR__sysctl, sys_sysctl), // 149
1343 GENX_(__NR_mlock, sys_mlock), // 150
1344 GENX_(__NR_munlock, sys_munlock), // 151
1345 GENX_(__NR_mlockall, sys_mlockall), // 152
1346 LINX_(__NR_munlockall, sys_munlockall), // 153
1347 LINXY(__NR_sched_setparam, sys_sched_setparam), // 154
1349 LINXY(__NR_sched_getparam, sys_sched_getparam), // 155
1350 LINX_(__NR_sched_setscheduler, sys_sched_setscheduler), // 156
1351 LINX_(__NR_sched_getscheduler, sys_sched_getscheduler), // 157
1352 LINX_(__NR_sched_yield, sys_sched_yield), // 158
1353 LINX_(__NR_sched_get_priority_max, sys_sched_get_priority_max),// 159
1355 LINX_(__NR_sched_get_priority_min, sys_sched_get_priority_min),// 160
1356 LINXY(__NR_sched_rr_get_interval, sys_sched_rr_get_interval), // 161
1357 GENXY(__NR_nanosleep, sys_nanosleep), // 162
1358 GENX_(__NR_mremap, sys_mremap), // 163
1359 LINX_(__NR_setresuid, sys_setresuid16), // 164
1361 LINXY(__NR_getresuid, sys_getresuid16), // 165
1362 PLAXY(__NR_vm86, sys_vm86), // 166 x86/Linux-only
1363 GENX_(__NR_query_module, sys_ni_syscall), // 167
1364 GENXY(__NR_poll, sys_poll), // 168
1365 //zz // (__NR_nfsservctl, sys_nfsservctl), // 169 */Linux
1366 //zz
1367 LINX_(__NR_setresgid, sys_setresgid16), // 170
1368 LINXY(__NR_getresgid, sys_getresgid16), // 171
1369 LINXY(__NR_prctl, sys_prctl), // 172
1370 PLAX_(__NR_rt_sigreturn, sys_rt_sigreturn), // 173 x86/Linux only?
1371 LINXY(__NR_rt_sigaction, sys_rt_sigaction), // 174
1373 LINXY(__NR_rt_sigprocmask, sys_rt_sigprocmask), // 175
1374 LINXY(__NR_rt_sigpending, sys_rt_sigpending), // 176
1375 LINXY(__NR_rt_sigtimedwait, sys_rt_sigtimedwait),// 177
1376 LINXY(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo),// 178
1377 LINX_(__NR_rt_sigsuspend, sys_rt_sigsuspend), // 179
1379 GENXY(__NR_pread64, sys_pread64), // 180
1380 GENX_(__NR_pwrite64, sys_pwrite64), // 181
1381 LINX_(__NR_chown, sys_chown16), // 182
1382 GENXY(__NR_getcwd, sys_getcwd), // 183
1383 LINXY(__NR_capget, sys_capget), // 184
1385 LINX_(__NR_capset, sys_capset), // 185
1386 GENXY(__NR_sigaltstack, sys_sigaltstack), // 186
1387 LINXY(__NR_sendfile, sys_sendfile), // 187
1388 GENXY(__NR_getpmsg, sys_getpmsg), // 188
1389 GENX_(__NR_putpmsg, sys_putpmsg), // 189
1391 // Nb: we treat vfork as fork
1392 GENX_(__NR_vfork, sys_fork), // 190
1393 GENXY(__NR_ugetrlimit, sys_getrlimit), // 191
1394 PLAX_(__NR_mmap2, sys_mmap2), // 192
1395 GENX_(__NR_truncate64, sys_truncate64), // 193
1396 GENX_(__NR_ftruncate64, sys_ftruncate64), // 194
1398 PLAXY(__NR_stat64, sys_stat64), // 195
1399 PLAXY(__NR_lstat64, sys_lstat64), // 196
1400 PLAXY(__NR_fstat64, sys_fstat64), // 197
1401 GENX_(__NR_lchown32, sys_lchown), // 198
1402 GENX_(__NR_getuid32, sys_getuid), // 199
1404 GENX_(__NR_getgid32, sys_getgid), // 200
1405 GENX_(__NR_geteuid32, sys_geteuid), // 201
1406 GENX_(__NR_getegid32, sys_getegid), // 202
1407 GENX_(__NR_setreuid32, sys_setreuid), // 203
1408 GENX_(__NR_setregid32, sys_setregid), // 204
1410 GENXY(__NR_getgroups32, sys_getgroups), // 205
1411 GENX_(__NR_setgroups32, sys_setgroups), // 206
1412 GENX_(__NR_fchown32, sys_fchown), // 207
1413 LINX_(__NR_setresuid32, sys_setresuid), // 208
1414 LINXY(__NR_getresuid32, sys_getresuid), // 209
1416 LINX_(__NR_setresgid32, sys_setresgid), // 210
1417 LINXY(__NR_getresgid32, sys_getresgid), // 211
1418 GENX_(__NR_chown32, sys_chown), // 212
1419 GENX_(__NR_setuid32, sys_setuid), // 213
1420 GENX_(__NR_setgid32, sys_setgid), // 214
1422 LINX_(__NR_setfsuid32, sys_setfsuid), // 215
1423 LINX_(__NR_setfsgid32, sys_setfsgid), // 216
1424 LINX_(__NR_pivot_root, sys_pivot_root), // 217
1425 GENXY(__NR_mincore, sys_mincore), // 218
1426 GENX_(__NR_madvise, sys_madvise), // 219
1428 GENXY(__NR_getdents64, sys_getdents64), // 220
1429 LINXY(__NR_fcntl64, sys_fcntl64), // 221
1430 GENX_(222, sys_ni_syscall), // 222
1431 PLAXY(223, sys_syscall223), // 223 // sys_bproc?
1432 LINX_(__NR_gettid, sys_gettid), // 224
1434 LINX_(__NR_readahead, sys_readahead), // 225 */Linux
1435 LINX_(__NR_setxattr, sys_setxattr), // 226
1436 LINX_(__NR_lsetxattr, sys_lsetxattr), // 227
1437 LINX_(__NR_fsetxattr, sys_fsetxattr), // 228
1438 LINXY(__NR_getxattr, sys_getxattr), // 229
1440 LINXY(__NR_lgetxattr, sys_lgetxattr), // 230
1441 LINXY(__NR_fgetxattr, sys_fgetxattr), // 231
1442 LINXY(__NR_listxattr, sys_listxattr), // 232
1443 LINXY(__NR_llistxattr, sys_llistxattr), // 233
1444 LINXY(__NR_flistxattr, sys_flistxattr), // 234
1446 LINX_(__NR_removexattr, sys_removexattr), // 235
1447 LINX_(__NR_lremovexattr, sys_lremovexattr), // 236
1448 LINX_(__NR_fremovexattr, sys_fremovexattr), // 237
1449 LINXY(__NR_tkill, sys_tkill), // 238 */Linux
1450 LINXY(__NR_sendfile64, sys_sendfile64), // 239
1452 LINXY(__NR_futex, sys_futex), // 240
1453 LINX_(__NR_sched_setaffinity, sys_sched_setaffinity), // 241
1454 LINXY(__NR_sched_getaffinity, sys_sched_getaffinity), // 242
1455 PLAX_(__NR_set_thread_area, sys_set_thread_area), // 243
1456 PLAX_(__NR_get_thread_area, sys_get_thread_area), // 244
1458 LINXY(__NR_io_setup, sys_io_setup), // 245
1459 LINX_(__NR_io_destroy, sys_io_destroy), // 246
1460 LINXY(__NR_io_getevents, sys_io_getevents), // 247
1461 LINX_(__NR_io_submit, sys_io_submit), // 248
1462 LINXY(__NR_io_cancel, sys_io_cancel), // 249
1464 LINX_(__NR_fadvise64, sys_fadvise64), // 250 */(Linux?)
1465 GENX_(251, sys_ni_syscall), // 251
1466 LINX_(__NR_exit_group, sys_exit_group), // 252
1467 LINXY(__NR_lookup_dcookie, sys_lookup_dcookie), // 253
1468 LINXY(__NR_epoll_create, sys_epoll_create), // 254
1470 LINX_(__NR_epoll_ctl, sys_epoll_ctl), // 255
1471 LINXY(__NR_epoll_wait, sys_epoll_wait), // 256
1472 //zz // (__NR_remap_file_pages, sys_remap_file_pages), // 257 */Linux
1473 LINX_(__NR_set_tid_address, sys_set_tid_address), // 258
1474 LINXY(__NR_timer_create, sys_timer_create), // 259
1476 LINXY(__NR_timer_settime, sys_timer_settime), // (timer_create+1)
1477 LINXY(__NR_timer_gettime, sys_timer_gettime), // (timer_create+2)
1478 LINX_(__NR_timer_getoverrun, sys_timer_getoverrun),//(timer_create+3)
1479 LINX_(__NR_timer_delete, sys_timer_delete), // (timer_create+4)
1480 LINX_(__NR_clock_settime, sys_clock_settime), // (timer_create+5)
1482 LINXY(__NR_clock_gettime, sys_clock_gettime), // (timer_create+6)
1483 LINXY(__NR_clock_getres, sys_clock_getres), // (timer_create+7)
1484 LINXY(__NR_clock_nanosleep, sys_clock_nanosleep),// (timer_create+8) */*
1485 GENXY(__NR_statfs64, sys_statfs64), // 268
1486 GENXY(__NR_fstatfs64, sys_fstatfs64), // 269
1488 LINX_(__NR_tgkill, sys_tgkill), // 270 */Linux
1489 GENX_(__NR_utimes, sys_utimes), // 271
1490 LINX_(__NR_fadvise64_64, sys_fadvise64_64), // 272 */(Linux?)
1491 GENX_(__NR_vserver, sys_ni_syscall), // 273
1492 LINX_(__NR_mbind, sys_mbind), // 274 ?/?
1494 LINXY(__NR_get_mempolicy, sys_get_mempolicy), // 275 ?/?
1495 LINX_(__NR_set_mempolicy, sys_set_mempolicy), // 276 ?/?
1496 LINXY(__NR_mq_open, sys_mq_open), // 277
1497 LINX_(__NR_mq_unlink, sys_mq_unlink), // (mq_open+1)
1498 LINX_(__NR_mq_timedsend, sys_mq_timedsend), // (mq_open+2)
1500 LINXY(__NR_mq_timedreceive, sys_mq_timedreceive),// (mq_open+3)
1501 LINX_(__NR_mq_notify, sys_mq_notify), // (mq_open+4)
1502 LINXY(__NR_mq_getsetattr, sys_mq_getsetattr), // (mq_open+5)
1503 GENX_(__NR_sys_kexec_load, sys_ni_syscall), // 283
1504 LINXY(__NR_waitid, sys_waitid), // 284
1506 GENX_(285, sys_ni_syscall), // 285
1507 LINX_(__NR_add_key, sys_add_key), // 286
1508 LINX_(__NR_request_key, sys_request_key), // 287
1509 LINXY(__NR_keyctl, sys_keyctl), // 288
1510 LINX_(__NR_ioprio_set, sys_ioprio_set), // 289
1512 LINX_(__NR_ioprio_get, sys_ioprio_get), // 290
1513 LINX_(__NR_inotify_init, sys_inotify_init), // 291
1514 LINX_(__NR_inotify_add_watch, sys_inotify_add_watch), // 292
1515 LINX_(__NR_inotify_rm_watch, sys_inotify_rm_watch), // 293
1516 // LINX_(__NR_migrate_pages, sys_migrate_pages), // 294
1518 LINXY(__NR_openat, sys_openat), // 295
1519 LINX_(__NR_mkdirat, sys_mkdirat), // 296
1520 LINX_(__NR_mknodat, sys_mknodat), // 297
1521 LINX_(__NR_fchownat, sys_fchownat), // 298
1522 LINX_(__NR_futimesat, sys_futimesat), // 299
1524 PLAXY(__NR_fstatat64, sys_fstatat64), // 300
1525 LINX_(__NR_unlinkat, sys_unlinkat), // 301
1526 LINX_(__NR_renameat, sys_renameat), // 302
1527 LINX_(__NR_linkat, sys_linkat), // 303
1528 LINX_(__NR_symlinkat, sys_symlinkat), // 304
1530 LINX_(__NR_readlinkat, sys_readlinkat), // 305
1531 LINX_(__NR_fchmodat, sys_fchmodat), // 306
1532 LINX_(__NR_faccessat, sys_faccessat), // 307
1533 LINXY(__NR_pselect6, sys_pselect6), // 308
1534 LINXY(__NR_ppoll, sys_ppoll), // 309
1536 LINX_(__NR_unshare, sys_unshare), // 310
1537 LINX_(__NR_set_robust_list, sys_set_robust_list), // 311
1538 LINXY(__NR_get_robust_list, sys_get_robust_list), // 312
1539 LINX_(__NR_splice, sys_splice), // 313
1540 LINX_(__NR_sync_file_range, sys_sync_file_range), // 314
1542 LINX_(__NR_tee, sys_tee), // 315
1543 LINXY(__NR_vmsplice, sys_vmsplice), // 316
1544 LINXY(__NR_move_pages, sys_move_pages), // 317
1545 LINXY(__NR_getcpu, sys_getcpu), // 318
1546 LINXY(__NR_epoll_pwait, sys_epoll_pwait), // 319
1548 LINX_(__NR_utimensat, sys_utimensat), // 320
1549 LINXY(__NR_signalfd, sys_signalfd), // 321
1550 LINXY(__NR_timerfd_create, sys_timerfd_create), // 322
1551 LINXY(__NR_eventfd, sys_eventfd), // 323
1552 LINX_(__NR_fallocate, sys_fallocate), // 324
1554 LINXY(__NR_timerfd_settime, sys_timerfd_settime), // 325
1555 LINXY(__NR_timerfd_gettime, sys_timerfd_gettime), // 326
1556 LINXY(__NR_signalfd4, sys_signalfd4), // 327
1557 LINXY(__NR_eventfd2, sys_eventfd2), // 328
1558 LINXY(__NR_epoll_create1, sys_epoll_create1), // 329
1560 LINXY(__NR_dup3, sys_dup3), // 330
1561 LINXY(__NR_pipe2, sys_pipe2), // 331
1562 LINXY(__NR_inotify_init1, sys_inotify_init1), // 332
1563 LINXY(__NR_preadv, sys_preadv), // 333
1564 LINX_(__NR_pwritev, sys_pwritev), // 334
1566 LINXY(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo),// 335
1567 LINXY(__NR_perf_event_open, sys_perf_event_open), // 336
1568 LINXY(__NR_recvmmsg, sys_recvmmsg), // 337
1569 LINXY(__NR_fanotify_init, sys_fanotify_init), // 338
1570 LINX_(__NR_fanotify_mark, sys_fanotify_mark), // 339
1572 LINXY(__NR_prlimit64, sys_prlimit64), // 340
1573 LINXY(__NR_name_to_handle_at, sys_name_to_handle_at),// 341
1574 LINXY(__NR_open_by_handle_at, sys_open_by_handle_at),// 342
1575 LINXY(__NR_clock_adjtime, sys_clock_adjtime), // 343
1576 LINX_(__NR_syncfs, sys_syncfs), // 344
1578 LINXY(__NR_sendmmsg, sys_sendmmsg), // 345
1579 // LINX_(__NR_setns, sys_ni_syscall), // 346
1580 LINXY(__NR_process_vm_readv, sys_process_vm_readv), // 347
1581 LINX_(__NR_process_vm_writev, sys_process_vm_writev),// 348
1582 LINX_(__NR_kcmp, sys_kcmp), // 349
1584 // LIN__(__NR_finit_module, sys_ni_syscall), // 350
1585 // LIN__(__NR_sched_setattr, sys_ni_syscall), // 351
1586 // LIN__(__NR_sched_getattr, sys_ni_syscall), // 352
1587 LINX_(__NR_renameat2, sys_renameat2), // 353
1588 // LIN__(__NR_seccomp, sys_ni_syscall), // 354
1590 LINXY(__NR_getrandom, sys_getrandom), // 355
1591 LINXY(__NR_memfd_create, sys_memfd_create), // 356
1592 // LIN__(__NR_bpf, sys_ni_syscall), // 357
1593 LINXY(__NR_socket, sys_socket), // 359
1594 LINXY(__NR_socketpair, sys_socketpair), // 360
1595 LINX_(__NR_bind, sys_bind), // 361
1596 LINX_(__NR_connect, sys_connect), // 362
1597 LINX_(__NR_listen, sys_listen), // 363
1598 LINXY(__NR_accept4, sys_accept4), // 364
1599 LINXY(__NR_getsockopt, sys_getsockopt), // 365
1600 LINX_(__NR_setsockopt, sys_setsockopt), // 366
1601 LINXY(__NR_getsockname, sys_getsockname), // 367
1602 LINXY(__NR_getpeername, sys_getpeername), // 368
1603 LINX_(__NR_sendto, sys_sendto), // 369
1604 LINX_(__NR_sendmsg, sys_sendmsg), // 370
1605 LINXY(__NR_recvfrom, sys_recvfrom), // 371
1606 LINXY(__NR_recvmsg, sys_recvmsg), // 372
1607 LINX_(__NR_shutdown, sys_shutdown) // 373
1610 SyscallTableEntry* ML_(get_linux_syscall_entry) ( UInt sysno )
1612 const UInt syscall_table_size
1613 = sizeof(syscall_table) / sizeof(syscall_table[0]);
1615 /* Is it in the contiguous initial section of the table? */
1616 if (sysno < syscall_table_size) {
1617 SyscallTableEntry* sys = &syscall_table[sysno];
1618 if (sys->before == NULL)
1619 return NULL; /* no entry */
1620 else
1621 return sys;
1624 /* Can't find a wrapper */
1625 return NULL;
1628 #endif // defined(VGP_x86_linux)
1630 /*--------------------------------------------------------------------*/
1631 /*--- end ---*/
1632 /*--------------------------------------------------------------------*/