1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2 ; RUN: opt -passes='default<O3>' -S %s | FileCheck %s
4 target triple = "arm64-apple-macosx"
6 declare void @llvm.assume(i1 noundef)
8 define i32 @earlycse_entry(ptr %p) {
9 ; CHECK-LABEL: define i32 @earlycse_entry(
10 ; CHECK-SAME: ptr captures(none) [[P:%.*]]) local_unnamed_addr {
11 ; CHECK-NEXT: [[L_I:%.*]] = load ptr, ptr [[P]], align 8
12 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[L_I]], i64 4) ]
13 ; CHECK-NEXT: [[L_ASSUME_ALIGNED_I_I:%.*]] = load i32, ptr [[L_I]], align 4
14 ; CHECK-NEXT: [[R_I_I:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I_I]])
15 ; CHECK-NEXT: [[L_2_I:%.*]] = load ptr, ptr [[P]], align 8
16 ; CHECK-NEXT: [[GEP_I:%.*]] = getelementptr i8, ptr [[L_2_I]], i64 4
17 ; CHECK-NEXT: store ptr [[GEP_I]], ptr [[P]], align 8
18 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[GEP_I]], i64 4) ]
19 ; CHECK-NEXT: [[L_ASSUME_ALIGNED_I_I2:%.*]] = load i32, ptr [[GEP_I]], align 4
20 ; CHECK-NEXT: [[R_I_I3:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I_I2]])
21 ; CHECK-NEXT: [[L_2_I4:%.*]] = load ptr, ptr [[P]], align 8
22 ; CHECK-NEXT: [[GEP_I5:%.*]] = getelementptr i8, ptr [[L_2_I4]], i64 4
23 ; CHECK-NEXT: store ptr [[GEP_I5]], ptr [[P]], align 8
24 ; CHECK-NEXT: ret i32 [[R_I_I3]]
26 %r.1 = call i32 @earlycse_fn1(ptr %p)
27 %r.2 = call i32 @earlycse_fn1(ptr %p)
31 define i32 @earlycse_fn1(ptr %p) {
32 ; CHECK-LABEL: define i32 @earlycse_fn1(
33 ; CHECK-SAME: ptr captures(none) [[P:%.*]]) local_unnamed_addr {
34 ; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[P]], align 8
35 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[L]], i64 4) ]
36 ; CHECK-NEXT: [[L_ASSUME_ALIGNED_I:%.*]] = load i32, ptr [[L]], align 4
37 ; CHECK-NEXT: [[R_I:%.*]] = tail call i32 @swap(i32 [[L_ASSUME_ALIGNED_I]])
38 ; CHECK-NEXT: [[L_2:%.*]] = load ptr, ptr [[P]], align 8
39 ; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[L_2]], i64 4
40 ; CHECK-NEXT: store ptr [[GEP]], ptr [[P]], align 8
41 ; CHECK-NEXT: ret i32 [[R_I]]
43 %l = load ptr, ptr %p, align 8
44 %r = call i32 @load_assume_aligned(ptr %l)
45 %l.2 = load ptr, ptr %p, align 8
46 %gep = getelementptr i8, ptr %l.2, i64 4
47 store ptr %gep, ptr %p, align 8
51 define i32 @load_assume_aligned(ptr %p) {
52 ; CHECK-LABEL: define i32 @load_assume_aligned(
53 ; CHECK-SAME: ptr [[P:%.*]]) local_unnamed_addr {
54 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P]], i64 4) ]
55 ; CHECK-NEXT: [[DOT0_COPYLOAD:%.*]] = load i32, ptr [[P]], align 4
56 ; CHECK-NEXT: [[TMP2:%.*]] = tail call i32 @swap(i32 [[DOT0_COPYLOAD]])
57 ; CHECK-NEXT: ret i32 [[TMP2]]
59 call void @llvm.assume(i1 true) [ "align"(ptr %p, i64 4) ]
60 %l.assume_aligned = load i32, ptr %p, align 1
61 %r = call i32 @swap(i32 %l.assume_aligned)
65 declare i32 @swap(i32)
67 define void @sroa_align_entry(ptr %p) {
68 ; CHECK-LABEL: define void @sroa_align_entry(
69 ; CHECK-SAME: ptr [[P:%.*]]) local_unnamed_addr #[[ATTR1:[0-9]+]] {
70 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P]], i64 8) ]
71 ; CHECK-NEXT: [[DOT0_COPYLOAD_I_I_I:%.*]] = load i64, ptr [[P]], align 8
72 ; CHECK-NEXT: [[TMP2:%.*]] = inttoptr i64 [[DOT0_COPYLOAD_I_I_I]] to ptr
73 ; CHECK-NEXT: store i32 0, ptr [[TMP2]], align 4
74 ; CHECK-NEXT: ret void
76 %a = alloca ptr, align 8
77 store ptr %p, ptr %a, align 8
78 %r = call ptr @sroa_fn1(ptr %a)
79 store i32 0, ptr %r, align 4
83 define ptr @sroa_fn1(ptr %p) {
84 ; CHECK-LABEL: define ptr @sroa_fn1(
85 ; CHECK-SAME: ptr readonly captures(none) [[P:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
86 ; CHECK-NEXT: [[L:%.*]] = load ptr, ptr [[P]], align 8
87 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[L]], i64 8) ]
88 ; CHECK-NEXT: [[L_FN3_I_I:%.*]] = load i64, ptr [[L]], align 8
89 ; CHECK-NEXT: [[I_I:%.*]] = inttoptr i64 [[L_FN3_I_I]] to ptr
90 ; CHECK-NEXT: ret ptr [[I_I]]
92 %l = load ptr, ptr %p, align 8
93 %r = call ptr @sroa_fn2(ptr %l)
97 define ptr @sroa_fn2(ptr %p) {
98 ; CHECK-LABEL: define ptr @sroa_fn2(
99 ; CHECK-SAME: ptr [[P:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
100 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[P]], i64 8) ]
101 ; CHECK-NEXT: [[DOT0_COPYLOAD_I_I:%.*]] = load i64, ptr [[P]], align 8
102 ; CHECK-NEXT: [[TMP3:%.*]] = inttoptr i64 [[DOT0_COPYLOAD_I_I]] to ptr
103 ; CHECK-NEXT: ret ptr [[TMP3]]
105 %r = call i64 @sroa_fn3(ptr %p)
106 %i = inttoptr i64 %r to ptr
110 define i64 @sroa_fn3(ptr %0) {
111 ; CHECK-LABEL: define i64 @sroa_fn3(
112 ; CHECK-SAME: ptr [[TMP0:%.*]]) local_unnamed_addr #[[ATTR3]] {
113 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[TMP0]], i64 8) ]
114 ; CHECK-NEXT: [[DOT0_COPYLOAD_I:%.*]] = load i64, ptr [[TMP0]], align 8
115 ; CHECK-NEXT: ret i64 [[DOT0_COPYLOAD_I]]
117 call void @llvm.assume(i1 true) [ "align"(ptr %0, i64 8) ]
118 %l.fn3 = load i64, ptr %0, align 1