1 #ifndef LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
2 #define LLVM_TOOLS_LLVM_BOLT_SYS_X86_64
4 // Save all registers while keeping 16B stack alignment
43 // Get the difference between runtime addrress of .text section and
44 // static address in section header table. Can be extracted from arbitrary
45 // pc value recorded at runtime to get the corresponding static address, which
46 // in turn can be used to search for indirect call description. Needed because
47 // indirect call descriptions are read-only non-relocatable data.
48 uint64_t getTextBaseAddress() {
51 __asm__
volatile("leaq __hot_end(%%rip), %0\n\t"
52 "movabsq $__hot_end, %1\n\t"
53 : "=r"(DynAddr
), "=r"(StaticAddr
));
54 return DynAddr
- StaticAddr
;
57 #define _STRINGIFY(x) #x
58 #define STRINGIFY(x) _STRINGIFY(x)
60 uint64_t __read(uint64_t fd
, const void *buf
, uint64_t count
) {
62 #if defined(__APPLE__)
63 #define READ_SYSCALL 0x2000003
65 #define READ_SYSCALL 0
67 __asm__
__volatile__("movq $" STRINGIFY(READ_SYSCALL
) ", %%rax\n"
70 : "D"(fd
), "S"(buf
), "d"(count
)
71 : "cc", "rcx", "r11", "memory");
75 uint64_t __write(uint64_t fd
, const void *buf
, uint64_t count
) {
77 #if defined(__APPLE__)
78 #define WRITE_SYSCALL 0x2000004
80 #define WRITE_SYSCALL 1
82 __asm__
__volatile__("movq $" STRINGIFY(WRITE_SYSCALL
) ", %%rax\n"
85 : "D"(fd
), "S"(buf
), "d"(count
)
86 : "cc", "rcx", "r11", "memory");
90 void *__mmap(uint64_t addr
, uint64_t size
, uint64_t prot
, uint64_t flags
,
91 uint64_t fd
, uint64_t offset
) {
92 #if defined(__APPLE__)
93 #define MMAP_SYSCALL 0x20000c5
95 #define MMAP_SYSCALL 9
98 register uint64_t r8
asm("r8") = fd
;
99 register uint64_t r9
asm("r9") = offset
;
100 register uint64_t r10
asm("r10") = flags
;
101 __asm__
__volatile__("movq $" STRINGIFY(MMAP_SYSCALL
) ", %%rax\n"
104 : "D"(addr
), "S"(size
), "d"(prot
), "r"(r10
), "r"(r8
),
106 : "cc", "rcx", "r11", "memory");
110 uint64_t __munmap(void *addr
, uint64_t size
) {
111 #if defined(__APPLE__)
112 #define MUNMAP_SYSCALL 0x2000049
114 #define MUNMAP_SYSCALL 11
117 __asm__
__volatile__("movq $" STRINGIFY(MUNMAP_SYSCALL
) ", %%rax\n"
120 : "D"(addr
), "S"(size
)
121 : "cc", "rcx", "r11", "memory");
125 uint64_t __sigprocmask(int how
, const void *set
, void *oldset
) {
126 #if defined(__APPLE__)
127 #define SIGPROCMASK_SYSCALL 0x2000030
129 #define SIGPROCMASK_SYSCALL 14
132 register long r10
asm("r10") = sizeof(uint64_t);
133 __asm__
__volatile__("movq $" STRINGIFY(SIGPROCMASK_SYSCALL
) ", %%rax\n"
136 : "D"(how
), "S"(set
), "d"(oldset
), "r"(r10
)
137 : "cc", "rcx", "r11", "memory");
141 uint64_t __getpid() {
143 #if defined(__APPLE__)
144 #define GETPID_SYSCALL 20
146 #define GETPID_SYSCALL 39
148 __asm__
__volatile__("movq $" STRINGIFY(GETPID_SYSCALL
) ", %%rax\n"
152 : "cc", "rcx", "r11", "memory");
156 uint64_t __exit(uint64_t code
) {
157 #if defined(__APPLE__)
158 #define EXIT_SYSCALL 0x2000001
160 #define EXIT_SYSCALL 231
163 __asm__
__volatile__("movq $" STRINGIFY(EXIT_SYSCALL
) ", %%rax\n"
167 : "cc", "rcx", "r11", "memory");
171 #if !defined(__APPLE__)
172 // We use a stack-allocated buffer for string manipulation in many pieces of
173 // this code, including the code that prints each line of the fdata file. This
174 // buffer needs to accomodate large function names, but shouldn't be arbitrarily
175 // large (dynamically allocated) for simplicity of our memory space usage.
177 // Declare some syscall wrappers we use throughout this code to avoid linking
178 // against system libc.
179 uint64_t __open(const char *pathname
, uint64_t flags
, uint64_t mode
) {
181 __asm__
__volatile__("movq $2, %%rax\n"
184 : "D"(pathname
), "S"(flags
), "d"(mode
)
185 : "cc", "rcx", "r11", "memory");
189 long __getdents64(unsigned int fd
, dirent64
*dirp
, size_t count
) {
191 __asm__
__volatile__("movq $217, %%rax\n"
194 : "D"(fd
), "S"(dirp
), "d"(count
)
195 : "cc", "rcx", "r11", "memory");
199 uint64_t __readlink(const char *pathname
, char *buf
, size_t bufsize
) {
201 __asm__
__volatile__("movq $89, %%rax\n"
204 : "D"(pathname
), "S"(buf
), "d"(bufsize
)
205 : "cc", "rcx", "r11", "memory");
209 uint64_t __lseek(uint64_t fd
, uint64_t pos
, uint64_t whence
) {
211 __asm__
__volatile__("movq $8, %%rax\n"
214 : "D"(fd
), "S"(pos
), "d"(whence
)
215 : "cc", "rcx", "r11", "memory");
219 int __ftruncate(uint64_t fd
, uint64_t length
) {
221 __asm__
__volatile__("movq $77, %%rax\n"
224 : "D"(fd
), "S"(length
)
225 : "cc", "rcx", "r11", "memory");
229 int __close(uint64_t fd
) {
231 __asm__
__volatile__("movq $3, %%rax\n"
235 : "cc", "rcx", "r11", "memory");
239 int __madvise(void *addr
, size_t length
, int advice
) {
241 __asm__
__volatile__("movq $28, %%rax\n"
244 : "D"(addr
), "S"(length
), "d"(advice
)
245 : "cc", "rcx", "r11", "memory");
249 int __uname(struct UtsNameTy
*Buf
) {
251 __asm__
__volatile__("movq $63, %%rax\n"
255 : "cc", "rcx", "r11", "memory");
259 uint64_t __nanosleep(const timespec
*req
, timespec
*rem
) {
261 __asm__
__volatile__("movq $35, %%rax\n"
265 : "cc", "rcx", "r11", "memory");
271 __asm__
__volatile__("movq $57, %%rax\n"
275 : "cc", "rcx", "r11", "memory");
279 int __mprotect(void *addr
, size_t len
, int prot
) {
281 __asm__
__volatile__("movq $10, %%rax\n"
284 : "D"(addr
), "S"(len
), "d"(prot
)
285 : "cc", "rcx", "r11", "memory");
289 uint64_t __getppid() {
291 __asm__
__volatile__("movq $110, %%rax\n"
295 : "cc", "rcx", "r11", "memory");
299 int __setpgid(uint64_t pid
, uint64_t pgid
) {
301 __asm__
__volatile__("movq $109, %%rax\n"
304 : "D"(pid
), "S"(pgid
)
305 : "cc", "rcx", "r11", "memory");
309 uint64_t __getpgid(uint64_t pid
) {
311 __asm__
__volatile__("movq $121, %%rax\n"
315 : "cc", "rcx", "r11", "memory");
319 int __kill(uint64_t pid
, int sig
) {
321 __asm__
__volatile__("movq $62, %%rax\n"
325 : "cc", "rcx", "r11", "memory");
329 int __fsync(int fd
) {
331 __asm__
__volatile__("movq $74, %%rax\n"
335 : "cc", "rcx", "r11", "memory");
339 // %rdi %rsi %rdx %r10 %r8
340 // sys_prctl int option unsigned unsigned unsigned unsigned
341 // long arg2 long arg3 long arg4 long arg5
342 int __prctl(int Option
, unsigned long Arg2
, unsigned long Arg3
,
343 unsigned long Arg4
, unsigned long Arg5
) {
345 register long rdx
asm("rdx") = Arg3
;
346 register long r8
asm("r8") = Arg5
;
347 register long r10
asm("r10") = Arg4
;
348 __asm__
__volatile__("movq $157, %%rax\n"
351 : "D"(Option
), "S"(Arg2
), "d"(rdx
), "r"(r10
), "r"(r8
)
358 } // anonymous namespace