1 ; RUN: opt -S %s -atomic-expand | FileCheck %s
3 ;;; NOTE: this test is actually target-independent -- any target which
4 ;;; doesn't support inline atomics can be used. (E.g. X86 i386 would
5 ;;; work, if LLVM is properly taught about what it's missing vs i586.)
7 ;target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
8 ;target triple = "i386-unknown-unknown"
9 target datalayout = "e-m:e-p:32:32-i64:64-f128:64-n32-S64"
10 target triple = "sparc-unknown-unknown"
12 ;; First, check the sized calls. Except for cmpxchg, these are fairly
15 ; CHECK-LABEL: @test_load_i16(
16 ; CHECK: %1 = call i16 @__atomic_load_2(ptr %arg, i32 5)
18 define i16 @test_load_i16(ptr %arg) {
19 %ret = load atomic i16, ptr %arg seq_cst, align 4
23 ; CHECK-LABEL: @test_store_i16(
24 ; CHECK: call void @__atomic_store_2(ptr %arg, i16 %val, i32 5)
26 define void @test_store_i16(ptr %arg, i16 %val) {
27 store atomic i16 %val, ptr %arg seq_cst, align 4
31 ; CHECK-LABEL: @test_exchange_i16(
32 ; CHECK: %1 = call i16 @__atomic_exchange_2(ptr %arg, i16 %val, i32 5)
34 define i16 @test_exchange_i16(ptr %arg, i16 %val) {
35 %ret = atomicrmw xchg ptr %arg, i16 %val seq_cst
39 ; CHECK-LABEL: @test_cmpxchg_i16(
40 ; CHECK: %1 = alloca i16, align 2
41 ; CHECK: call void @llvm.lifetime.start.p0(i64 2, ptr %1)
42 ; CHECK: store i16 %old, ptr %1, align 2
43 ; CHECK: %2 = call zeroext i1 @__atomic_compare_exchange_2(ptr %arg, ptr %1, i16 %new, i32 5, i32 0)
44 ; CHECK: %3 = load i16, ptr %1, align 2
45 ; CHECK: call void @llvm.lifetime.end.p0(i64 2, ptr %1)
46 ; CHECK: %4 = insertvalue { i16, i1 } poison, i16 %3, 0
47 ; CHECK: %5 = insertvalue { i16, i1 } %4, i1 %2, 1
48 ; CHECK: %ret = extractvalue { i16, i1 } %5, 0
50 define i16 @test_cmpxchg_i16(ptr %arg, i16 %old, i16 %new) {
51 %ret_succ = cmpxchg ptr %arg, i16 %old, i16 %new seq_cst monotonic
52 %ret = extractvalue { i16, i1 } %ret_succ, 0
56 ; CHECK-LABEL: @test_add_i16(
57 ; CHECK: %1 = call i16 @__atomic_fetch_add_2(ptr %arg, i16 %val, i32 5)
59 define i16 @test_add_i16(ptr %arg, i16 %val) {
60 %ret = atomicrmw add ptr %arg, i16 %val seq_cst
65 ;; Now, check the output for the unsized libcalls. i128 is used for
66 ;; these tests because the "16" suffixed functions aren't available on
69 ; CHECK-LABEL: @test_load_i128(
70 ; CHECK: %1 = alloca i128, align 8
71 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
72 ; CHECK: call void @__atomic_load(i32 16, ptr %arg, ptr %1, i32 5)
73 ; CHECK: %2 = load i128, ptr %1, align 8
74 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
76 define i128 @test_load_i128(ptr %arg) {
77 %ret = load atomic i128, ptr %arg seq_cst, align 16
81 ; CHECK-LABEL: @test_store_i128(
82 ; CHECK: %1 = alloca i128, align 8
83 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
84 ; CHECK: store i128 %val, ptr %1, align 8
85 ; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
86 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
88 define void @test_store_i128(ptr %arg, i128 %val) {
89 store atomic i128 %val, ptr %arg seq_cst, align 16
93 ; CHECK-LABEL: @test_exchange_i128(
94 ; CHECK: %1 = alloca i128, align 8
95 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
96 ; CHECK: store i128 %val, ptr %1, align 8
97 ; CHECK: %2 = alloca i128, align 8
98 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
99 ; CHECK: call void @__atomic_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5)
100 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
101 ; CHECK: %3 = load i128, ptr %2, align 8
102 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
104 define i128 @test_exchange_i128(ptr %arg, i128 %val) {
105 %ret = atomicrmw xchg ptr %arg, i128 %val seq_cst
109 ; CHECK-LABEL: @test_cmpxchg_i128(
110 ; CHECK: %1 = alloca i128, align 8
111 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
112 ; CHECK: store i128 %old, ptr %1, align 8
113 ; CHECK: %2 = alloca i128, align 8
114 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
115 ; CHECK: store i128 %new, ptr %2, align 8
116 ; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 0)
117 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
118 ; CHECK: %4 = load i128, ptr %1, align 8
119 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
120 ; CHECK: %5 = insertvalue { i128, i1 } poison, i128 %4, 0
121 ; CHECK: %6 = insertvalue { i128, i1 } %5, i1 %3, 1
122 ; CHECK: %ret = extractvalue { i128, i1 } %6, 0
123 ; CHECK: ret i128 %ret
124 define i128 @test_cmpxchg_i128(ptr %arg, i128 %old, i128 %new) {
125 %ret_succ = cmpxchg ptr %arg, i128 %old, i128 %new seq_cst monotonic
126 %ret = extractvalue { i128, i1 } %ret_succ, 0
130 ; This one is a verbose expansion, as there is no generic
131 ; __atomic_fetch_add function, so it needs to expand to a cmpxchg
132 ; loop, which then itself expands into a libcall.
134 ; CHECK-LABEL: @test_add_i128(
135 ; CHECK: %1 = alloca i128, align 8
136 ; CHECK: %2 = alloca i128, align 8
137 ; CHECK: %3 = load i128, ptr %arg, align 16
138 ; CHECK: br label %atomicrmw.start
139 ; CHECK:atomicrmw.start:
140 ; CHECK: %loaded = phi i128 [ %3, %0 ], [ %newloaded, %atomicrmw.start ]
141 ; CHECK: %new = add i128 %loaded, %val
142 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
143 ; CHECK: store i128 %loaded, ptr %1, align 8
144 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %2)
145 ; CHECK: store i128 %new, ptr %2, align 8
146 ; CHECK: %4 = call zeroext i1 @__atomic_compare_exchange(i32 16, ptr %arg, ptr %1, ptr %2, i32 5, i32 5)
147 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %2)
148 ; CHECK: %5 = load i128, ptr %1, align 8
149 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
150 ; CHECK: %6 = insertvalue { i128, i1 } poison, i128 %5, 0
151 ; CHECK: %7 = insertvalue { i128, i1 } %6, i1 %4, 1
152 ; CHECK: %success = extractvalue { i128, i1 } %7, 1
153 ; CHECK: %newloaded = extractvalue { i128, i1 } %7, 0
154 ; CHECK: br i1 %success, label %atomicrmw.end, label %atomicrmw.start
155 ; CHECK:atomicrmw.end:
156 ; CHECK: ret i128 %newloaded
157 define i128 @test_add_i128(ptr %arg, i128 %val) {
158 %ret = atomicrmw add ptr %arg, i128 %val seq_cst
162 ;; Ensure that non-integer types get bitcast correctly on the way in and out of a libcall:
164 ; CHECK-LABEL: @test_load_double(
165 ; CHECK: %1 = call i64 @__atomic_load_8(ptr %arg, i32 5)
166 ; CHECK: %2 = bitcast i64 %1 to double
167 ; CHECK: ret double %2
168 define double @test_load_double(ptr %arg, double %val) {
169 %1 = load atomic double, ptr %arg seq_cst, align 16
173 ; CHECK-LABEL: @test_store_double(
174 ; CHECK: %1 = bitcast double %val to i64
175 ; CHECK: call void @__atomic_store_8(ptr %arg, i64 %1, i32 5)
177 define void @test_store_double(ptr %arg, double %val) {
178 store atomic double %val, ptr %arg seq_cst, align 16
182 ; CHECK-LABEL: @test_cmpxchg_ptr(
183 ; CHECK: %1 = alloca ptr, align 4
184 ; CHECK: call void @llvm.lifetime.start.p0(i64 4, ptr %1)
185 ; CHECK: store ptr %old, ptr %1, align 4
186 ; CHECK: %2 = ptrtoint ptr %new to i32
187 ; CHECK: %3 = call zeroext i1 @__atomic_compare_exchange_4(ptr %arg, ptr %1, i32 %2, i32 5, i32 2)
188 ; CHECK: %4 = load ptr, ptr %1, align 4
189 ; CHECK: call void @llvm.lifetime.end.p0(i64 4, ptr %1)
190 ; CHECK: %5 = insertvalue { ptr, i1 } poison, ptr %4, 0
191 ; CHECK: %6 = insertvalue { ptr, i1 } %5, i1 %3, 1
192 ; CHECK: %ret = extractvalue { ptr, i1 } %6, 0
193 ; CHECK: ret ptr %ret
195 define ptr @test_cmpxchg_ptr(ptr %arg, ptr %old, ptr %new) {
196 %ret_succ = cmpxchg ptr %arg, ptr %old, ptr %new seq_cst acquire
197 %ret = extractvalue { ptr, i1 } %ret_succ, 0
201 ;; ...and for a non-integer type of large size too.
203 ; CHECK-LABEL: @test_store_fp128
204 ; CHECK: %1 = alloca fp128, align 8
205 ; CHECK: call void @llvm.lifetime.start.p0(i64 16, ptr %1)
206 ; CHECK: store fp128 %val, ptr %1, align 8
207 ; CHECK: call void @__atomic_store(i32 16, ptr %arg, ptr %1, i32 5)
208 ; CHECK: call void @llvm.lifetime.end.p0(i64 16, ptr %1)
210 define void @test_store_fp128(ptr %arg, fp128 %val) {
211 store atomic fp128 %val, ptr %arg seq_cst, align 16
215 ;; Unaligned loads and stores should be expanded to the generic
216 ;; libcall, just like large loads/stores, and not a specialized one.
217 ;; NOTE: atomicrmw and cmpxchg don't yet support an align attribute;
218 ;; when such support is added, they should also be tested here.
220 ; CHECK-LABEL: @test_unaligned_load_i16(
221 ; CHECK: __atomic_load(
222 define i16 @test_unaligned_load_i16(ptr %arg) {
223 %ret = load atomic i16, ptr %arg seq_cst, align 1
227 ; CHECK-LABEL: @test_unaligned_store_i16(
228 ; CHECK: __atomic_store(
229 define void @test_unaligned_store_i16(ptr %arg, i16 %val) {
230 store atomic i16 %val, ptr %arg seq_cst, align 1