1 //===-- clear_cache.c - Implement __clear_cache ---------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 #if defined(__linux__)
16 #include <libkern/OSCacheControl.h>
20 // Forward declare Win32 APIs since the GCC mode driver does not handle the
21 // newer SDKs as well as needed.
22 uint32_t FlushInstructionCache(uintptr_t hProcess
, void *lpBaseAddress
,
24 uintptr_t GetCurrentProcess(void);
27 #if defined(__FreeBSD__) && defined(__arm__)
29 #include <sys/types.h>
30 #include <machine/sysarch.h>
34 #if defined(__NetBSD__) && defined(__arm__)
35 #include <machine/sysarch.h>
38 #if defined(__OpenBSD__) && (defined(__arm__) || defined(__mips__) || defined(__riscv))
40 #include <sys/types.h>
41 #include <machine/sysarch.h>
45 #if defined(__linux__) && defined(__mips__)
46 #include <sys/cachectl.h>
47 #include <sys/syscall.h>
51 #if defined(__linux__) && defined(__riscv)
52 // to get platform-specific syscall definitions
53 #include <linux/unistd.h>
56 // The compiler generates calls to __clear_cache() when creating
57 // trampoline functions on the stack for use with nested functions.
58 // It is expected to invalidate the instruction cache for the
61 void __clear_cache(void *start
, void *end
) {
62 #if __i386__ || __x86_64__ || defined(_M_IX86) || defined(_M_X64)
63 // Intel processors have a unified instruction and data cache
64 // so there is nothing to do
65 #elif defined(_WIN32) && (defined(__arm__) || defined(__aarch64__))
66 FlushInstructionCache(GetCurrentProcess(), start
, end
- start
);
67 #elif defined(__arm__) && !defined(__APPLE__)
68 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)
69 struct arm_sync_icache_args arg
;
71 arg
.addr
= (uintptr_t)start
;
72 arg
.len
= (uintptr_t)end
- (uintptr_t)start
;
74 sysarch(ARM_SYNC_ICACHE
, &arg
);
75 #elif defined(__linux__)
76 // We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but
77 // it also brought many other unused defines, as well as a dependency on
78 // kernel headers to be installed.
80 // This value is stable at least since Linux 3.13 and should remain so for
81 // compatibility reasons, warranting it's re-definition here.
82 #define __ARM_NR_cacheflush 0x0f0002
83 register int start_reg
__asm("r0") = (int)(intptr_t)start
;
84 const register int end_reg
__asm("r1") = (int)(intptr_t)end
;
85 const register int flags
__asm("r2") = 0;
86 const register int syscall_nr
__asm("r7") = __ARM_NR_cacheflush
;
87 __asm
__volatile("svc 0x0"
89 : "r"(syscall_nr
), "r"(start_reg
), "r"(end_reg
), "r"(flags
));
90 assert(start_reg
== 0 && "Cache flush syscall failed.");
94 #elif defined(__linux__) && defined(__loongarch__)
95 __asm__
volatile("ibar 0");
96 #elif defined(__mips__)
97 const uintptr_t start_int
= (uintptr_t)start
;
98 const uintptr_t end_int
= (uintptr_t)end
;
100 __asm__
volatile("rdhwr %0, $1" : "=r"(synci_step
));
101 if (synci_step
!= 0) {
102 #if __mips_isa_rev >= 6
103 for (uintptr_t p
= start_int
; p
< end_int
; p
+= synci_step
)
104 __asm__
volatile("synci 0(%0)" : : "r"(p
));
106 // The last "move $at, $0" is the target of jr.hb instead of delay slot.
107 __asm__
volatile(".set noat\n"
114 // Pre-R6 may not be globalized. And some implementations may give strange
115 // synci_step. So, let's use libc call for it.
116 cacheflush(start
, end_int
- start_int
, BCACHE
);
119 #elif defined(__aarch64__) && !defined(__APPLE__)
120 uint64_t xstart
= (uint64_t)(uintptr_t)start
;
121 uint64_t xend
= (uint64_t)(uintptr_t)end
;
123 // Get Cache Type Info.
124 static uint64_t ctr_el0
= 0;
126 __asm
__volatile("mrs %0, ctr_el0" : "=r"(ctr_el0
));
128 // The DC and IC instructions must use 64-bit registers so we don't use
129 // uintptr_t in case this runs in an IPL32 environment.
132 // If CTR_EL0.IDC is set, data cache cleaning to the point of unification
133 // is not required for instruction to data coherence.
134 if (((ctr_el0
>> 28) & 0x1) == 0x0) {
135 const size_t dcache_line_size
= 4 << ((ctr_el0
>> 16) & 15);
136 for (addr
= xstart
& ~(dcache_line_size
- 1); addr
< xend
;
137 addr
+= dcache_line_size
)
138 __asm
__volatile("dc cvau, %0" ::"r"(addr
));
140 __asm
__volatile("dsb ish");
142 // If CTR_EL0.DIC is set, instruction cache invalidation to the point of
143 // unification is not required for instruction to data coherence.
144 if (((ctr_el0
>> 29) & 0x1) == 0x0) {
145 const size_t icache_line_size
= 4 << ((ctr_el0
>> 0) & 15);
146 for (addr
= xstart
& ~(icache_line_size
- 1); addr
< xend
;
147 addr
+= icache_line_size
)
148 __asm
__volatile("ic ivau, %0" ::"r"(addr
));
149 __asm
__volatile("dsb ish");
151 __asm
__volatile("isb sy");
152 #elif defined(__powerpc__)
153 // Newer CPUs have a bigger line size made of multiple blocks, so the
154 // following value is a minimal common denominator for what used to be
155 // a single block cache line and is therefore inneficient.
156 const size_t line_size
= 32;
157 const size_t len
= (uintptr_t)end
- (uintptr_t)start
;
159 const uintptr_t mask
= ~(line_size
- 1);
160 const uintptr_t start_line
= ((uintptr_t)start
) & mask
;
161 const uintptr_t end_line
= ((uintptr_t)start
+ len
+ line_size
- 1) & mask
;
163 for (uintptr_t line
= start_line
; line
< end_line
; line
+= line_size
)
164 __asm__
volatile("dcbf 0, %0" : : "r"(line
));
165 __asm__
volatile("sync");
167 for (uintptr_t line
= start_line
; line
< end_line
; line
+= line_size
)
168 __asm__
volatile("icbi 0, %0" : : "r"(line
));
169 __asm__
volatile("isync");
170 #elif defined(__sparc__)
171 const size_t dword_size
= 8;
172 const size_t len
= (uintptr_t)end
- (uintptr_t)start
;
174 const uintptr_t mask
= ~(dword_size
- 1);
175 const uintptr_t start_dword
= ((uintptr_t)start
) & mask
;
176 const uintptr_t end_dword
= ((uintptr_t)start
+ len
+ dword_size
- 1) & mask
;
178 for (uintptr_t dword
= start_dword
; dword
< end_dword
; dword
+= dword_size
)
179 __asm__
volatile("flush %0" : : "r"(dword
));
180 #elif defined(__riscv) && defined(__linux__)
181 // See: arch/riscv/include/asm/cacheflush.h, arch/riscv/kernel/sys_riscv.c
182 register void *start_reg
__asm("a0") = start
;
183 const register void *end_reg
__asm("a1") = end
;
184 // "0" means that we clear cache for all threads (SYS_RISCV_FLUSH_ICACHE_ALL)
185 const register long flags
__asm("a2") = 0;
186 const register long syscall_nr
__asm("a7") = __NR_riscv_flush_icache
;
187 __asm
__volatile("ecall"
189 : "r"(start_reg
), "r"(end_reg
), "r"(flags
), "r"(syscall_nr
));
190 assert(start_reg
== 0 && "Cache flush syscall failed.");
191 #elif defined(__riscv) && defined(__OpenBSD__)
192 struct riscv_sync_icache_args arg
;
194 arg
.addr
= (uintptr_t)start
;
195 arg
.len
= (uintptr_t)end
- (uintptr_t)start
;
197 sysarch(RISCV_SYNC_ICACHE
, &arg
);
198 #elif defined(__ve__)
199 __asm__
volatile("fencec 2");
202 // On Darwin, sys_icache_invalidate() provides this functionality
203 sys_icache_invalidate(start
, end
- start
);