1 // RUN: %clang_cc1 -triple arm64-unknown-linux -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LINUX
2 // RUN: %clang_cc1 -triple aarch64-windows -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-WIN
3 // RUN: %clang_cc1 -triple arm64_32-apple-ios13 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
6 void f0(void *a
, void *b
) {
8 // CHECK: call {{.*}} @__clear_cache
12 return __builtin_thread_pointer ();
13 // CHECK-LINUX: call {{.*}} @llvm.thread.pointer()
16 // CHECK: call {{.*}} @llvm.bitreverse.i32(i32 %a)
17 unsigned rbit(unsigned a
) {
18 return __builtin_arm_rbit(a
);
21 // CHECK-WIN: [[A64:%[^ ]+]] = zext i32 %a to i64
22 // CHECK-WIN: call i64 @llvm.bitreverse.i64(i64 [[A64]])
23 // CHECK-LINUX: call i64 @llvm.bitreverse.i64(i64 %a)
24 unsigned long rbitl(unsigned long a
) {
25 return __builtin_arm_rbit64(a
);
28 // CHECK: call {{.*}} @llvm.bitreverse.i64(i64 %a)
29 uint64_t rbit64(uint64_t a
) {
30 return __builtin_arm_rbit64(a
);
34 __builtin_arm_nop(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 0)
35 __builtin_arm_yield(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 1)
36 __builtin_arm_wfe(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 2)
37 __builtin_arm_wfi(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 3)
38 __builtin_arm_sev(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 4)
39 __builtin_arm_sevl(); //CHECK: call {{.*}} @llvm.aarch64.hint(i32 5)
43 __builtin_arm_dmb(1); //CHECK: call {{.*}} @llvm.aarch64.dmb(i32 1)
44 __builtin_arm_dsb(2); //CHECK: call {{.*}} @llvm.aarch64.dsb(i32 2)
45 __builtin_arm_isb(3); //CHECK: call {{.*}} @llvm.aarch64.isb(i32 3)
49 __builtin_arm_prefetch(0, 1, 2, 0, 1); // pstl3keep
50 // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 1, i32 2, i32 0, i32 1)
52 __builtin_arm_prefetch(0, 0, 0, 1, 1); // pldl1keep
53 // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 1, i32 1)
55 __builtin_arm_prefetch(0, 0, 0, 1, 1); // pldl1strm
56 // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 1, i32 1)
58 __builtin_arm_prefetch(0, 0, 0, 0, 0); // plil1keep
59 // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 0, i32 0, i32 0)
61 __builtin_arm_prefetch(0, 0, 3, 0, 1); // pldslckeep
62 // CHECK: call {{.*}} @llvm.aarch64.prefetch(ptr null, i32 0, i32 3, i32 0, i32 1)
65 __attribute__((target("v8.5a")))
66 int32_t jcvt(double v
) {
68 //CHECK: call i32 @llvm.aarch64.fjcvtzs
69 return __builtin_arm_jcvt(v
);
72 __typeof__(__builtin_arm_rsr("1:2:3:4:5")) rsr(void);
75 // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
76 // CHECK-NEXT: trunc i64 [[V0]] to i32
77 return __builtin_arm_rsr("1:2:3:4:5");
80 __typeof__(__builtin_arm_rsr64("1:2:3:4:5")) rsr64(void);
82 uint64_t rsr64(void) {
83 // CHECK: call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
84 return __builtin_arm_rsr64("1:2:3:4:5");
88 // CHECK: [[V0:[%A-Za-z0-9.]+]] = call i64 @llvm.read_volatile_register.i64(metadata ![[M0:[0-9]]])
89 // CHECK-NEXT: inttoptr i64 [[V0]] to ptr
90 return __builtin_arm_rsrp("1:2:3:4:5");
93 __typeof__(__builtin_arm_wsr("1:2:3:4:5", 0)) wsr(unsigned);
95 void wsr(unsigned v
) {
96 // CHECK: [[V0:[%A-Za-z0-9.]+]] = zext i32 %v to i64
97 // CHECK-NEXT: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 [[V0]])
98 __builtin_arm_wsr("1:2:3:4:5", v
);
101 __typeof__(__builtin_arm_wsr64("1:2:3:4:5", 0)) wsr64(uint64_t);
103 void wsr64(uint64_t v
) {
104 // CHECK: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 %v)
105 __builtin_arm_wsr64("1:2:3:4:5", v
);
109 // CHECK: [[V0:[%A-Za-z0-9.]+]] = ptrtoint ptr %v to i64
110 // CHECK-NEXT: call void @llvm.write_register.i64(metadata ![[M0:[0-9]]], i64 [[V0]])
111 __builtin_arm_wsrp("1:2:3:4:5", v
);
114 unsigned int cls(uint32_t v
) {
115 // CHECK: call i32 @llvm.aarch64.cls(i32 %v)
116 return __builtin_arm_cls(v
);
119 unsigned int clsl(unsigned long v
) {
120 // CHECK-WIN: [[V64:%[^ ]+]] = zext i32 %v to i64
121 // CHECK-WIN: call i32 @llvm.aarch64.cls64(i64 [[V64]]
122 // CHECK-LINUX: call i32 @llvm.aarch64.cls64(i64 %v)
123 return __builtin_arm_cls64(v
);
126 unsigned int clsll(uint64_t v
) {
127 // CHECK: call i32 @llvm.aarch64.cls64(i64 %v)
128 return __builtin_arm_cls64(v
);
131 // CHECK-LABEL: @rndr(
132 // CHECK-NEXT: entry:
133 // CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndr()
134 // CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
135 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
136 // CHECK-NEXT: store i64 [[TMP1]], ptr [[__ADDR:%.*]], align 8
137 // CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
138 // CHECK-NEXT: ret i32 [[TMP3]]
140 __attribute__((target("rand")))
141 int rndr(uint64_t *__addr
) {
142 return __builtin_arm_rndr(__addr
);
145 // CHECK-LABEL: @rndrrs(
146 // CHECK-NEXT: entry:
147 // CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.aarch64.rndrrs()
148 // CHECK-NEXT: [[TMP1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
149 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
150 // CHECK-NEXT: store i64 [[TMP1]], ptr [[__ADDR:%.*]], align 8
151 // CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
152 // CHECK-NEXT: ret i32 [[TMP3]]
154 __attribute__((target("rand")))
155 int rndrrs(uint64_t *__addr
) {
156 return __builtin_arm_rndrrs(__addr
);
159 // CHECK: ![[M0]] = !{!"1:2:3:4:5"}