1 //===-- sanitizer_syscall_linux_loongarch64.inc -----------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Implementations of internal_syscall and internal_iserror for
12 //===----------------------------------------------------------------------===//
14 // About local register variables:
15 // https://gcc.gnu.org/onlinedocs/gcc/Local-Register-Variables.html#Local-Register-Variables
18 // https://lore.kernel.org/loongarch/1f353678-3398-e30b-1c87-6edb278f74db@xen0n.name/T/#m1613bc86c2d7bf5f6da92bd62984302bfd699a2f
19 // syscall number is placed in a7
20 // parameters, if present, are placed in a0-a6
22 // the return value is placed in a0
23 // t0-t8 should be considered clobbered
24 // all other registers are preserved
25 #define SYSCALL(name) __NR_##name
27 #define INTERNAL_SYSCALL_CLOBBERS \
28 "memory", "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8"
30 static uptr __internal_syscall(u64 nr) {
31 register u64 a7 asm("$a7") = nr;
32 register u64 a0 asm("$a0");
33 __asm__ volatile("syscall 0\n\t"
36 : INTERNAL_SYSCALL_CLOBBERS);
39 #define __internal_syscall0(n) (__internal_syscall)(n)
41 static uptr __internal_syscall(u64 nr, u64 arg1) {
42 register u64 a7 asm("$a7") = nr;
43 register u64 a0 asm("$a0") = arg1;
44 __asm__ volatile("syscall 0\n\t"
47 : INTERNAL_SYSCALL_CLOBBERS);
50 #define __internal_syscall1(n, a1) (__internal_syscall)(n, (u64)(a1))
52 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
53 register u64 a7 asm("$a7") = nr;
54 register u64 a0 asm("$a0") = arg1;
55 register u64 a1 asm("$a1") = arg2;
56 __asm__ volatile("syscall 0\n\t"
59 : INTERNAL_SYSCALL_CLOBBERS);
62 #define __internal_syscall2(n, a1, a2) \
63 (__internal_syscall)(n, (u64)(a1), (long)(a2))
65 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
66 register u64 a7 asm("$a7") = nr;
67 register u64 a0 asm("$a0") = arg1;
68 register u64 a1 asm("$a1") = arg2;
69 register u64 a2 asm("$a2") = arg3;
70 __asm__ volatile("syscall 0\n\t"
72 : "r"(a7), "r"(a1), "r"(a2)
73 : INTERNAL_SYSCALL_CLOBBERS);
76 #define __internal_syscall3(n, a1, a2, a3) \
77 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
79 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
81 register u64 a7 asm("$a7") = nr;
82 register u64 a0 asm("$a0") = arg1;
83 register u64 a1 asm("$a1") = arg2;
84 register u64 a2 asm("$a2") = arg3;
85 register u64 a3 asm("$a3") = arg4;
86 __asm__ volatile("syscall 0\n\t"
88 : "r"(a7), "r"(a1), "r"(a2), "r"(a3)
89 : INTERNAL_SYSCALL_CLOBBERS);
92 #define __internal_syscall4(n, a1, a2, a3, a4) \
93 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
95 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
97 register u64 a7 asm("$a7") = nr;
98 register u64 a0 asm("$a0") = arg1;
99 register u64 a1 asm("$a1") = arg2;
100 register u64 a2 asm("$a2") = arg3;
101 register u64 a3 asm("$a3") = arg4;
102 register u64 a4 asm("$a4") = arg5;
103 __asm__ volatile("syscall 0\n\t"
105 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4)
106 : INTERNAL_SYSCALL_CLOBBERS);
109 #define __internal_syscall5(n, a1, a2, a3, a4, a5) \
110 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
113 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
114 long arg5, long arg6) {
115 register u64 a7 asm("$a7") = nr;
116 register u64 a0 asm("$a0") = arg1;
117 register u64 a1 asm("$a1") = arg2;
118 register u64 a2 asm("$a2") = arg3;
119 register u64 a3 asm("$a3") = arg4;
120 register u64 a4 asm("$a4") = arg5;
121 register u64 a5 asm("$a5") = arg6;
122 __asm__ volatile("syscall 0\n\t"
124 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5)
125 : INTERNAL_SYSCALL_CLOBBERS);
128 #define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
129 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
130 (u64)(a5), (long)(a6))
132 static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, u64 arg4,
133 long arg5, long arg6, long arg7) {
134 register u64 a7 asm("$a7") = nr;
135 register u64 a0 asm("$a0") = arg1;
136 register u64 a1 asm("$a1") = arg2;
137 register u64 a2 asm("$a2") = arg3;
138 register u64 a3 asm("$a3") = arg4;
139 register u64 a4 asm("$a4") = arg5;
140 register u64 a5 asm("$a5") = arg6;
141 register u64 a6 asm("$a6") = arg7;
142 __asm__ volatile("syscall 0\n\t"
144 : "r"(a7), "r"(a1), "r"(a2), "r"(a3), "r"(a4), "r"(a5),
146 : INTERNAL_SYSCALL_CLOBBERS);
149 #define __internal_syscall7(n, a1, a2, a3, a4, a5, a6, a7) \
150 (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
151 (u64)(a5), (long)(a6), (long)(a7))
153 #define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
154 #define __SYSCALL_NARGS(...) \
155 __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
156 #define __SYSCALL_CONCAT_X(a, b) a##b
157 #define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
158 #define __SYSCALL_DISP(b, ...) \
159 __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
161 #define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
163 // Helper function used to avoid clobbering of errno.
164 bool internal_iserror(uptr retval, int *internal_errno) {
165 if (retval >= (uptr)-4095) {
167 *internal_errno = -retval;