1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes
2 ; RUN: opt < %s -function-attrs -S | FileCheck %s
3 ; RUN: opt < %s -passes=function-attrs -S | FileCheck %s
5 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7 ; Base case, empty function
9 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
10 ; CHECK-LABEL: @test1(
11 ; CHECK-NEXT: ret void
16 ; Show the bottom up walk
17 define void @test2() {
18 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
19 ; CHECK-LABEL: @test2(
20 ; CHECK-NEXT: call void @test1()
21 ; CHECK-NEXT: ret void
27 declare void @unknown() convergent
29 ; Negative case with convergent function
30 define void @test3() convergent {
31 ; CHECK: Function Attrs: convergent
32 ; CHECK-LABEL: @test3(
33 ; CHECK-NEXT: call void @unknown()
34 ; CHECK-NEXT: ret void
40 define i32 @test4(i32 %a, i32 %b) {
41 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
42 ; CHECK-LABEL: @test4(
43 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
44 ; CHECK-NEXT: ret i32 [[A]]
50 ; negative case - explicit sync
51 define void @test5(i8* %p) {
52 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn
53 ; CHECK-LABEL: @test5(
54 ; CHECK-NEXT: store atomic i8 0, i8* [[P:%.*]] seq_cst, align 1
55 ; CHECK-NEXT: ret void
57 store atomic i8 0, i8* %p seq_cst, align 1
61 ; negative case - explicit sync
62 define i8 @test6(i8* %p) {
63 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn
64 ; CHECK-LABEL: @test6(
65 ; CHECK-NEXT: [[V:%.*]] = load atomic i8, i8* [[P:%.*]] seq_cst, align 1
66 ; CHECK-NEXT: ret i8 [[V]]
68 %v = load atomic i8, i8* %p seq_cst, align 1
72 ; negative case - explicit sync
73 define void @test7(i8* %p) {
74 ; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn
75 ; CHECK-LABEL: @test7(
76 ; CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add i8* [[P:%.*]], i8 0 seq_cst, align 1
77 ; CHECK-NEXT: ret void
79 atomicrmw add i8* %p, i8 0 seq_cst, align 1
83 ; negative case - explicit sync
84 define void @test8(i8* %p) {
85 ; CHECK: Function Attrs: mustprogress nofree norecurse nounwind willreturn
86 ; CHECK-LABEL: @test8(
87 ; CHECK-NEXT: fence seq_cst
88 ; CHECK-NEXT: ret void
94 ; singlethread fences are okay
95 define void @test9(i8* %p) {
96 ; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind willreturn
97 ; CHECK-LABEL: @test9(
98 ; CHECK-NEXT: fence syncscope("singlethread") seq_cst
99 ; CHECK-NEXT: ret void
101 fence syncscope("singlethread") seq_cst
105 ; atomic load with monotonic ordering
106 define i32 @load_monotonic(i32* nocapture readonly %0) norecurse nounwind uwtable {
107 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
108 ; CHECK-LABEL: @load_monotonic(
109 ; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] monotonic, align 4
110 ; CHECK-NEXT: ret i32 [[TMP2]]
112 %2 = load atomic i32, i32* %0 monotonic, align 4
116 ; atomic store with monotonic ordering.
117 define void @store_monotonic(i32* nocapture %0) norecurse nounwind uwtable {
118 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
119 ; CHECK-LABEL: @store_monotonic(
120 ; CHECK-NEXT: store atomic i32 10, i32* [[TMP0:%.*]] monotonic, align 4
121 ; CHECK-NEXT: ret void
123 store atomic i32 10, i32* %0 monotonic, align 4
127 ; negative, should not deduce nosync
128 ; atomic load with acquire ordering.
129 define i32 @load_acquire(i32* nocapture readonly %0) norecurse nounwind uwtable {
130 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
131 ; CHECK-LABEL: @load_acquire(
132 ; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] acquire, align 4
133 ; CHECK-NEXT: ret i32 [[TMP2]]
135 %2 = load atomic i32, i32* %0 acquire, align 4
139 define i32 @load_unordered(i32* nocapture readonly %0) norecurse nounwind uwtable {
140 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nosync nounwind readonly willreturn uwtable
141 ; CHECK-LABEL: @load_unordered(
142 ; CHECK-NEXT: [[TMP2:%.*]] = load atomic i32, i32* [[TMP0:%.*]] unordered, align 4
143 ; CHECK-NEXT: ret i32 [[TMP2]]
145 %2 = load atomic i32, i32* %0 unordered, align 4
149 ; atomic store with unordered ordering.
150 define void @store_unordered(i32* nocapture %0) norecurse nounwind uwtable {
151 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nosync nounwind willreturn writeonly uwtable
152 ; CHECK-LABEL: @store_unordered(
153 ; CHECK-NEXT: store atomic i32 10, i32* [[TMP0:%.*]] unordered, align 4
154 ; CHECK-NEXT: ret void
156 store atomic i32 10, i32* %0 unordered, align 4
161 ; negative, should not deduce nosync
162 ; atomic load with release ordering
163 define void @load_release(i32* nocapture %0) norecurse nounwind uwtable {
164 ; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
165 ; CHECK-LABEL: @load_release(
166 ; CHECK-NEXT: store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
167 ; CHECK-NEXT: ret void
169 store atomic volatile i32 10, i32* %0 release, align 4
173 ; negative volatile, relaxed atomic
174 define void @load_volatile_release(i32* nocapture %0) norecurse nounwind uwtable {
175 ; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
176 ; CHECK-LABEL: @load_volatile_release(
177 ; CHECK-NEXT: store atomic volatile i32 10, i32* [[TMP0:%.*]] release, align 4
178 ; CHECK-NEXT: ret void
180 store atomic volatile i32 10, i32* %0 release, align 4
185 define void @volatile_store(i32* %0) norecurse nounwind uwtable {
186 ; CHECK: Function Attrs: argmemonly nofree norecurse nounwind uwtable
187 ; CHECK-LABEL: @volatile_store(
188 ; CHECK-NEXT: store volatile i32 14, i32* [[TMP0:%.*]], align 4
189 ; CHECK-NEXT: ret void
191 store volatile i32 14, i32* %0, align 4
195 ; negative, should not deduce nosync
197 define i32 @volatile_load(i32* %0) norecurse nounwind uwtable {
198 ; CHECK: Function Attrs: argmemonly mustprogress nofree norecurse nounwind willreturn uwtable
199 ; CHECK-LABEL: @volatile_load(
200 ; CHECK-NEXT: [[TMP2:%.*]] = load volatile i32, i32* [[TMP0:%.*]], align 4
201 ; CHECK-NEXT: ret i32 [[TMP2]]
203 %2 = load volatile i32, i32* %0, align 4
207 ; CHECK: Function Attrs: noinline nosync nounwind uwtable
208 ; CHECK-NEXT: declare void @nosync_function()
209 declare void @nosync_function() noinline nounwind uwtable nosync
211 define void @call_nosync_function() nounwind uwtable noinline {
212 ; CHECK: Function Attrs: noinline nosync nounwind uwtable
213 ; CHECK-LABEL: @call_nosync_function(
214 ; CHECK-NEXT: tail call void @nosync_function() #[[ATTR9:[0-9]+]]
215 ; CHECK-NEXT: ret void
217 tail call void @nosync_function() noinline nounwind uwtable
221 ; CHECK: Function Attrs: noinline nounwind uwtable
222 ; CHECK-NEXT: declare void @might_sync()
223 declare void @might_sync() noinline nounwind uwtable
225 define void @call_might_sync() nounwind uwtable noinline {
226 ; CHECK: Function Attrs: noinline nounwind uwtable
227 ; CHECK-LABEL: @call_might_sync(
228 ; CHECK-NEXT: tail call void @might_sync() #[[ATTR9]]
229 ; CHECK-NEXT: ret void
231 tail call void @might_sync() noinline nounwind uwtable
235 declare void @llvm.memcpy(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
236 declare void @llvm.memset(i8* %dest, i8 %val, i32 %len, i1 %isvolatile)
238 ; negative, checking volatile intrinsics.
239 define i32 @memcpy_volatile(i8* %ptr1, i8* %ptr2) {
240 ; CHECK: Function Attrs: argmemonly mustprogress nofree nounwind willreturn
241 ; CHECK-LABEL: @memcpy_volatile(
242 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* [[PTR1:%.*]], i8* [[PTR2:%.*]], i32 8, i1 true)
243 ; CHECK-NEXT: ret i32 4
245 call void @llvm.memcpy(i8* %ptr1, i8* %ptr2, i32 8, i1 1)
249 ; positive, non-volatile intrinsic.
250 define i32 @memset_non_volatile(i8* %ptr1, i8 %val) {
251 ; CHECK: Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn writeonly
252 ; CHECK-LABEL: @memset_non_volatile(
253 ; CHECK-NEXT: call void @llvm.memset.p0i8.i32(i8* [[PTR1:%.*]], i8 [[VAL:%.*]], i32 8, i1 false)
254 ; CHECK-NEXT: ret i32 4
256 call void @llvm.memset(i8* %ptr1, i8 %val, i32 8, i1 0)
260 ; negative, inline assembly.
261 define i32 @inline_asm_test(i32 %x) {
262 ; CHECK-LABEL: @inline_asm_test(
263 ; CHECK-NEXT: [[TMP1:%.*]] = call i32 asm "bswap $0", "=r,r"(i32 [[X:%.*]])
264 ; CHECK-NEXT: ret i32 4
266 call i32 asm "bswap $0", "=r,r"(i32 %x)
270 declare void @readnone_test() convergent readnone
272 ; negative. Convergent
273 define void @convergent_readnone(){
274 ; CHECK: Function Attrs: nofree nosync readnone
275 ; CHECK-LABEL: @convergent_readnone(
276 ; CHECK-NEXT: call void @readnone_test()
277 ; CHECK-NEXT: ret void
279 call void @readnone_test()
283 ; CHECK: Function Attrs: nounwind
284 ; CHECK-NEXT: declare void @llvm.x86.sse2.clflush(i8*)
285 declare void @llvm.x86.sse2.clflush(i8*)
286 @a = common global i32 0, align 4
288 ; negative. Synchronizing intrinsic
289 define void @i_totally_sync() {
290 ; CHECK: Function Attrs: nounwind
291 ; CHECK-LABEL: @i_totally_sync(
292 ; CHECK-NEXT: tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
293 ; CHECK-NEXT: ret void
295 tail call void @llvm.x86.sse2.clflush(i8* bitcast (i32* @a to i8*))
299 declare float @llvm.cos(float %val) readnone
301 define float @cos_test(float %x) {
302 ; CHECK: Function Attrs: mustprogress nofree nosync nounwind readnone willreturn
303 ; CHECK-LABEL: @cos_test(
304 ; CHECK-NEXT: [[C:%.*]] = call float @llvm.cos.f32(float [[X:%.*]])
305 ; CHECK-NEXT: ret float [[C]]
307 %c = call float @llvm.cos(float %x)