mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / x86 / entry / syscall_64.c
blobc176d2fab1da98274a098f6ffb2a8e349208572a
1 // SPDX-License-Identifier: GPL-2.0
2 /* System call table for x86-64. */
4 #include <linux/linkage.h>
5 #include <linux/sys.h>
6 #include <linux/cache.h>
7 #include <asm/asm-offsets.h>
8 #include <asm/syscall.h>
10 #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
11 #include <asm/syscalls_64.h>
12 #undef __SYSCALL_64
14 #define __SYSCALL_64(nr, sym, qual) [nr] = sym,
16 extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
18 asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
20 * Smells like a compiler bug -- it doesn't work
21 * when the & below is removed.
23 [0 ... __NR_syscall_max] = &sys_ni_syscall,
24 #include <asm/syscalls_64.h>