[NewPM][CodeGen][llc] Add NPM support (#70922)
[llvm-project.git] / bolt / runtime / sys_aarch64.h
blob77c9cfcc99f9807ba975f747403514549cb076bd
1 #ifndef LLVM_TOOLS_LLVM_BOLT_SYS_AARCH64
2 #define LLVM_TOOLS_LLVM_BOLT_SYS_AARCH64
4 // Save all registers while keeping 16B stack alignment
5 #define SAVE_ALL \
6 "stp x0, x1, [sp, #-16]!\n" \
7 "stp x2, x3, [sp, #-16]!\n" \
8 "stp x4, x5, [sp, #-16]!\n" \
9 "stp x6, x7, [sp, #-16]!\n" \
10 "stp x8, x9, [sp, #-16]!\n" \
11 "stp x10, x11, [sp, #-16]!\n" \
12 "stp x12, x13, [sp, #-16]!\n" \
13 "stp x14, x15, [sp, #-16]!\n" \
14 "stp x16, x17, [sp, #-16]!\n" \
15 "stp x18, x19, [sp, #-16]!\n" \
16 "stp x20, x21, [sp, #-16]!\n" \
17 "stp x22, x23, [sp, #-16]!\n" \
18 "stp x24, x25, [sp, #-16]!\n" \
19 "stp x26, x27, [sp, #-16]!\n" \
20 "stp x28, x29, [sp, #-16]!\n" \
21 "str x30, [sp,#-16]!\n"
22 // Mirrors SAVE_ALL
23 #define RESTORE_ALL \
24 "ldr x30, [sp], #16\n" \
25 "ldp x28, x29, [sp], #16\n" \
26 "ldp x26, x27, [sp], #16\n" \
27 "ldp x24, x25, [sp], #16\n" \
28 "ldp x22, x23, [sp], #16\n" \
29 "ldp x20, x21, [sp], #16\n" \
30 "ldp x18, x19, [sp], #16\n" \
31 "ldp x16, x17, [sp], #16\n" \
32 "ldp x14, x15, [sp], #16\n" \
33 "ldp x12, x13, [sp], #16\n" \
34 "ldp x10, x11, [sp], #16\n" \
35 "ldp x8, x9, [sp], #16\n" \
36 "ldp x6, x7, [sp], #16\n" \
37 "ldp x4, x5, [sp], #16\n" \
38 "ldp x2, x3, [sp], #16\n" \
39 "ldp x0, x1, [sp], #16\n"
41 // Anonymous namespace covering everything but our library entry point
42 namespace {
44 // Get the difference between runtime addrress of .text section and
45 // static address in section header table. Can be extracted from arbitrary
46 // pc value recorded at runtime to get the corresponding static address, which
47 // in turn can be used to search for indirect call description. Needed because
48 // indirect call descriptions are read-only non-relocatable data.
49 uint64_t getTextBaseAddress() {
50 uint64_t DynAddr;
51 uint64_t StaticAddr;
52 __asm__ volatile("b .instr%=\n\t"
53 ".StaticAddr%=:\n\t"
54 ".dword __hot_end\n\t"
55 ".instr%=:\n\t"
56 "ldr %0, .StaticAddr%=\n\t"
57 "adrp %1, __hot_end\n\t"
58 "add %1, %1, :lo12:__hot_end\n\t"
59 : "=r"(StaticAddr), "=r"(DynAddr));
60 return DynAddr - StaticAddr;
63 uint64_t __read(uint64_t fd, const void *buf, uint64_t count) {
64 uint64_t ret;
65 register uint64_t x0 __asm__("x0") = fd;
66 register const void *x1 __asm__("x1") = buf;
67 register uint64_t x2 __asm__("x2") = count;
68 register uint32_t w8 __asm__("w8") = 63;
69 __asm__ __volatile__("svc #0\n"
70 "mov %0, x0"
71 : "=r"(ret), "+r"(x0), "+r"(x1)
72 : "r"(x2), "r"(w8)
73 : "cc", "memory");
74 return ret;
77 uint64_t __write(uint64_t fd, const void *buf, uint64_t count) {
78 uint64_t ret;
79 register uint64_t x0 __asm__("x0") = fd;
80 register const void *x1 __asm__("x1") = buf;
81 register uint64_t x2 __asm__("x2") = count;
82 register uint32_t w8 __asm__("w8") = 64;
83 __asm__ __volatile__("svc #0\n"
84 "mov %0, x0"
85 : "=r"(ret), "+r"(x0), "+r"(x1)
86 : "r"(x2), "r"(w8)
87 : "cc", "memory");
88 return ret;
91 void *__mmap(uint64_t addr, uint64_t size, uint64_t prot, uint64_t flags,
92 uint64_t fd, uint64_t offset) {
93 void *ret;
94 register uint64_t x0 __asm__("x0") = addr;
95 register uint64_t x1 __asm__("x1") = size;
96 register uint64_t x2 __asm__("x2") = prot;
97 register uint64_t x3 __asm__("x3") = flags;
98 register uint64_t x4 __asm__("x4") = fd;
99 register uint64_t x5 __asm__("x5") = offset;
100 register uint32_t w8 __asm__("w8") = 222;
101 __asm__ __volatile__("svc #0\n"
102 "mov %0, x0"
103 : "=r"(ret), "+r"(x0), "+r"(x1)
104 : "r"(x2), "r"(x3), "r"(x4), "r"(x5), "r"(w8)
105 : "cc", "memory");
106 return ret;
109 uint64_t __munmap(void *addr, uint64_t size) {
110 uint64_t ret;
111 register void *x0 __asm__("x0") = addr;
112 register uint64_t x1 __asm__("x1") = size;
113 register uint32_t w8 __asm__("w8") = 215;
114 __asm__ __volatile__("svc #0\n"
115 "mov %0, x0"
116 : "=r"(ret), "+r"(x0), "+r"(x1)
117 : "r"(w8)
118 : "cc", "memory");
119 return ret;
122 uint64_t __exit(uint64_t code) {
123 uint64_t ret;
124 register uint64_t x0 __asm__("x0") = code;
125 register uint32_t w8 __asm__("w8") = 94;
126 __asm__ __volatile__("svc #0\n"
127 "mov %0, x0"
128 : "=r"(ret), "+r"(x0)
129 : "r"(w8)
130 : "cc", "memory", "x1");
131 return ret;
134 uint64_t __open(const char *pathname, uint64_t flags, uint64_t mode) {
135 uint64_t ret;
136 register int x0 __asm__("x0") = -100;
137 register const char *x1 __asm__("x1") = pathname;
138 register uint64_t x2 __asm__("x2") = flags;
139 register uint64_t x3 __asm__("x3") = mode;
140 register uint32_t w8 __asm__("w8") = 56;
141 __asm__ __volatile__("svc #0\n"
142 "mov %0, x0"
143 : "=r"(ret), "+r"(x0), "+r"(x1)
144 : "r"(x2), "r"(x3), "r"(w8)
145 : "cc", "memory");
146 return ret;
149 long __getdents64(unsigned int fd, dirent64 *dirp, size_t count) {
150 long ret;
151 register unsigned int x0 __asm__("x0") = fd;
152 register dirent64 *x1 __asm__("x1") = dirp;
153 register size_t x2 __asm__("x2") = count;
154 register uint32_t w8 __asm__("w8") = 61;
155 __asm__ __volatile__("svc #0\n"
156 "mov %0, x0"
157 : "=r"(ret), "+r"(x0), "+r"(x1)
158 : "r"(x2), "r"(w8)
159 : "cc", "memory");
160 return ret;
163 uint64_t __readlink(const char *pathname, char *buf, size_t bufsize) {
164 uint64_t ret;
165 register int x0 __asm__("x0") = -100;
166 register const char *x1 __asm__("x1") = pathname;
167 register char *x2 __asm__("x2") = buf;
168 register size_t x3 __asm__("x3") = bufsize;
169 register uint32_t w8 __asm__("w8") = 78; // readlinkat
170 __asm__ __volatile__("svc #0\n"
171 "mov %0, x0"
172 : "=r"(ret), "+r"(x0), "+r"(x1)
173 : "r"(x2), "r"(x3), "r"(w8)
174 : "cc", "memory");
175 return ret;
178 uint64_t __lseek(uint64_t fd, uint64_t pos, uint64_t whence) {
179 uint64_t ret;
180 register uint64_t x0 __asm__("x0") = fd;
181 register uint64_t x1 __asm__("x1") = pos;
182 register uint64_t x2 __asm__("x2") = whence;
183 register uint32_t w8 __asm__("w8") = 62;
184 __asm__ __volatile__("svc #0\n"
185 "mov %0, x0"
186 : "=r"(ret), "+r"(x0), "+r"(x1)
187 : "r"(x2), "r"(w8)
188 : "cc", "memory");
189 return ret;
192 int __ftruncate(uint64_t fd, uint64_t length) {
193 int ret;
194 register uint64_t x0 __asm__("x0") = fd;
195 register uint64_t x1 __asm__("x1") = length;
196 register uint32_t w8 __asm__("w8") = 46;
197 __asm__ __volatile__("svc #0\n"
198 "mov %w0, w0"
199 : "=r"(ret), "+r"(x0), "+r"(x1)
200 : "r"(w8)
201 : "cc", "memory");
202 return ret;
205 int __close(uint64_t fd) {
206 int ret;
207 register uint64_t x0 __asm__("x0") = fd;
208 register uint32_t w8 __asm__("w8") = 57;
209 __asm__ __volatile__("svc #0\n"
210 "mov %w0, w0"
211 : "=r"(ret), "+r"(x0)
212 : "r"(w8)
213 : "cc", "memory", "x1");
214 return ret;
217 int __madvise(void *addr, size_t length, int advice) {
218 int ret;
219 register void *x0 __asm__("x0") = addr;
220 register size_t x1 __asm__("x1") = length;
221 register int x2 __asm__("x2") = advice;
222 register uint32_t w8 __asm__("w8") = 233;
223 __asm__ __volatile__("svc #0\n"
224 "mov %w0, w0"
225 : "=r"(ret), "+r"(x0), "+r"(x1)
226 : "r"(x2), "r"(w8)
227 : "cc", "memory");
228 return ret;
231 int __uname(struct UtsNameTy *buf) {
232 int ret;
233 register UtsNameTy *x0 __asm__("x0") = buf;
234 register uint32_t w8 __asm__("w8") = 160;
235 __asm__ __volatile__("svc #0\n"
236 "mov %w0, w0"
237 : "=r"(ret), "+r"(x0)
238 : "r"(w8)
239 : "cc", "memory", "x1");
240 return ret;
243 uint64_t __nanosleep(const timespec *req, timespec *rem) {
244 uint64_t ret;
245 register const timespec *x0 __asm__("x0") = req;
246 register timespec *x1 __asm__("x1") = rem;
247 register uint32_t w8 __asm__("w8") = 101;
248 __asm__ __volatile__("svc #0\n"
249 "mov %0, x0"
250 : "=r"(ret), "+r"(x0), "+r"(x1)
251 : "r"(w8)
252 : "cc", "memory");
253 return ret;
256 int64_t __fork() {
257 uint64_t ret;
258 // clone instead of fork with flags
259 // "CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD"
260 register uint64_t x0 __asm__("x0") = 0x1200011;
261 register uint64_t x1 __asm__("x1") = 0;
262 register uint64_t x2 __asm__("x2") = 0;
263 register uint64_t x3 __asm__("x3") = 0;
264 register uint64_t x4 __asm__("x4") = 0;
265 register uint32_t w8 __asm__("w8") = 220;
266 __asm__ __volatile__("svc #0\n"
267 "mov %0, x0"
268 : "=r"(ret), "+r"(x0), "+r"(x1)
269 : "r"(x2), "r"(x3), "r"(x4), "r"(w8)
270 : "cc", "memory");
271 return ret;
274 int __mprotect(void *addr, size_t len, int prot) {
275 int ret;
276 register void *x0 __asm__("x0") = addr;
277 register size_t x1 __asm__("x1") = len;
278 register int x2 __asm__("x2") = prot;
279 register uint32_t w8 __asm__("w8") = 226;
280 __asm__ __volatile__("svc #0\n"
281 "mov %w0, w0"
282 : "=r"(ret), "+r"(x0), "+r"(x1)
283 : "r"(x2), "r"(w8)
284 : "cc", "memory");
285 return ret;
288 uint64_t __getpid() {
289 uint64_t ret;
290 register uint32_t w8 __asm__("w8") = 172;
291 __asm__ __volatile__("svc #0\n"
292 "mov %0, x0"
293 : "=r"(ret)
294 : "r"(w8)
295 : "cc", "memory", "x0", "x1");
296 return ret;
299 uint64_t __getppid() {
300 uint64_t ret;
301 register uint32_t w8 __asm__("w8") = 173;
302 __asm__ __volatile__("svc #0\n"
303 "mov %0, x0"
304 : "=r"(ret)
305 : "r"(w8)
306 : "cc", "memory", "x0", "x1");
307 return ret;
310 int __setpgid(uint64_t pid, uint64_t pgid) {
311 int ret;
312 register uint64_t x0 __asm__("x0") = pid;
313 register uint64_t x1 __asm__("x1") = pgid;
314 register uint32_t w8 __asm__("w8") = 154;
315 __asm__ __volatile__("svc #0\n"
316 "mov %w0, w0"
317 : "=r"(ret), "+r"(x0), "+r"(x1)
318 : "r"(w8)
319 : "cc", "memory");
320 return ret;
323 uint64_t __getpgid(uint64_t pid) {
324 uint64_t ret;
325 register uint64_t x0 __asm__("x0") = pid;
326 register uint32_t w8 __asm__("w8") = 155;
327 __asm__ __volatile__("svc #0\n"
328 "mov %0, x0"
329 : "=r"(ret), "+r"(x0)
330 : "r"(w8)
331 : "cc", "memory", "x1");
332 return ret;
335 int __kill(uint64_t pid, int sig) {
336 int ret;
337 register uint64_t x0 __asm__("x0") = pid;
338 register int x1 __asm__("x1") = sig;
339 register uint32_t w8 __asm__("w8") = 129;
340 __asm__ __volatile__("svc #0\n"
341 "mov %w0, w0"
342 : "=r"(ret), "+r"(x0), "+r"(x1)
343 : "r"(w8)
344 : "cc", "memory");
345 return ret;
348 int __fsync(int fd) {
349 int ret;
350 register int x0 __asm__("x0") = fd;
351 register uint32_t w8 __asm__("w8") = 82;
352 __asm__ __volatile__("svc #0\n"
353 "mov %w0, w0"
354 : "=r"(ret), "+r"(x0)
355 : "r"(w8)
356 : "cc", "memory", "x1");
357 return ret;
360 uint64_t __sigprocmask(int how, const void *set, void *oldset) {
361 uint64_t ret;
362 register int x0 __asm__("x0") = how;
363 register const void *x1 __asm__("x1") = set;
364 register void *x2 __asm__("x2") = oldset;
365 register long x3 asm("x3") = 8;
366 register uint32_t w8 __asm__("w8") = 135;
367 __asm__ __volatile__("svc #0\n"
368 "mov %0, x0"
369 : "=r"(ret), "+r"(x0), "+r"(x1)
370 : "r"(x2), "r"(x3), "r"(w8)
371 : "cc", "memory");
372 return ret;
375 int __prctl(int option, unsigned long arg2, unsigned long arg3,
376 unsigned long arg4, unsigned long arg5) {
377 int ret;
378 register int x0 __asm__("x0") = option;
379 register unsigned long x1 __asm__("x1") = arg2;
380 register unsigned long x2 __asm__("x2") = arg3;
381 register unsigned long x3 __asm__("x3") = arg4;
382 register unsigned long x4 __asm__("x4") = arg5;
383 register uint32_t w8 __asm__("w8") = 167;
384 __asm__ __volatile__("svc #0\n"
385 "mov %w0, w0"
386 : "=r"(ret), "+r"(x0), "+r"(x1)
387 : "r"(x2), "r"(x3), "r"(x4), "r"(w8)
388 : "cc", "memory");
389 return ret;
392 } // anonymous namespace
394 #endif