1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes='default<O3>' -enable-matrix -S %s | FileCheck %s
4 target triple = "arm64-apple-ios"
6 define void @matrix_extract_insert_scalar(i32 %i, i32 %k, i32 %j, [225 x double]* nonnull align 8 dereferenceable(1800) %A, [225 x double]* nonnull align 8 dereferenceable(1800) %B) #0 {
7 ; CHECK-LABEL: @matrix_extract_insert_scalar(
9 ; CHECK-NEXT: [[CONV:%.*]] = zext i32 [[K:%.*]] to i64
10 ; CHECK-NEXT: [[CONV1:%.*]] = zext i32 [[J:%.*]] to i64
11 ; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[CONV1]], 15
12 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV]]
13 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[TMP1]], 225
14 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
15 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast [225 x double]* [[A:%.*]] to <225 x double>*
16 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds <225 x double>, <225 x double>* [[TMP3]], i64 0, i64 [[TMP1]]
17 ; CHECK-NEXT: [[MATRIXEXT:%.*]] = load double, double* [[TMP4]], align 8
18 ; CHECK-NEXT: [[CONV2:%.*]] = zext i32 [[I:%.*]] to i64
19 ; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[TMP0]], [[CONV2]]
20 ; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i64 [[TMP5]], 225
21 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]])
22 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast [225 x double]* [[B:%.*]] to <225 x double>*
23 ; CHECK-NEXT: [[TMP8:%.*]] = load <225 x double>, <225 x double>* [[TMP7]], align 8
24 ; CHECK-NEXT: [[MATRIXEXT4:%.*]] = extractelement <225 x double> [[TMP8]], i64 [[TMP5]]
25 ; CHECK-NEXT: [[MUL:%.*]] = fmul double [[MATRIXEXT]], [[MATRIXEXT4]]
26 ; CHECK-NEXT: [[MATRIXEXT7:%.*]] = extractelement <225 x double> [[TMP8]], i64 [[TMP1]]
27 ; CHECK-NEXT: [[SUB:%.*]] = fsub double [[MATRIXEXT7]], [[MUL]]
28 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds <225 x double>, <225 x double>* [[TMP7]], i64 0, i64 [[TMP1]]
29 ; CHECK-NEXT: store double [[SUB]], double* [[TMP9]], align 8
30 ; CHECK-NEXT: ret void
33 %i.addr = alloca i32, align 4
34 %k.addr = alloca i32, align 4
35 %j.addr = alloca i32, align 4
36 %A.addr = alloca [225 x double]*, align 8
37 %B.addr = alloca [225 x double]*, align 8
38 store i32 %i, i32* %i.addr, align 4
39 store i32 %k, i32* %k.addr, align 4
40 store i32 %j, i32* %j.addr, align 4
41 store [225 x double]* %A, [225 x double]** %A.addr, align 8
42 store [225 x double]* %B, [225 x double]** %B.addr, align 8
43 %0 = load i32, i32* %k.addr, align 4
44 %conv = zext i32 %0 to i64
45 %1 = load i32, i32* %j.addr, align 4
46 %conv1 = zext i32 %1 to i64
47 %2 = mul i64 %conv1, 15
48 %3 = add i64 %2, %conv
49 %4 = icmp ult i64 %3, 225
50 call void @llvm.assume(i1 %4)
51 %5 = load [225 x double]*, [225 x double]** %A.addr, align 8
52 %6 = bitcast [225 x double]* %5 to <225 x double>*
53 %7 = load <225 x double>, <225 x double>* %6, align 8
54 %matrixext = extractelement <225 x double> %7, i64 %3
55 %8 = load i32, i32* %i.addr, align 4
56 %conv2 = zext i32 %8 to i64
57 %9 = load i32, i32* %j.addr, align 4
58 %conv3 = zext i32 %9 to i64
59 %10 = mul i64 %conv3, 15
60 %11 = add i64 %10, %conv2
61 %12 = icmp ult i64 %11, 225
62 call void @llvm.assume(i1 %12)
63 %13 = load [225 x double]*, [225 x double]** %B.addr, align 8
64 %14 = bitcast [225 x double]* %13 to <225 x double>*
65 %15 = load <225 x double>, <225 x double>* %14, align 8
66 %matrixext4 = extractelement <225 x double> %15, i64 %11
67 %mul = fmul double %matrixext, %matrixext4
68 %16 = load [225 x double]*, [225 x double]** %B.addr, align 8
69 %17 = load i32, i32* %k.addr, align 4
70 %conv5 = zext i32 %17 to i64
71 %18 = load i32, i32* %j.addr, align 4
72 %conv6 = zext i32 %18 to i64
73 %19 = mul i64 %conv6, 15
74 %20 = add i64 %19, %conv5
75 %21 = bitcast [225 x double]* %16 to <225 x double>*
76 %22 = icmp ult i64 %20, 225
77 call void @llvm.assume(i1 %22)
78 %23 = load <225 x double>, <225 x double>* %21, align 8
79 %matrixext7 = extractelement <225 x double> %23, i64 %20
80 %sub = fsub double %matrixext7, %mul
81 %24 = icmp ult i64 %20, 225
82 call void @llvm.assume(i1 %24)
83 %25 = load <225 x double>, <225 x double>* %21, align 8
84 %matins = insertelement <225 x double> %25, double %sub, i64 %20
85 store <225 x double> %matins, <225 x double>* %21, align 8
88 define void @matrix_extract_insert_loop(i32 %i, [225 x double]* nonnull align 8 dereferenceable(1800) %A, [225 x double]* nonnull align 8 dereferenceable(1800) %B) {
89 ; CHECK-LABEL: @matrix_extract_insert_loop(
91 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast [225 x double]* [[A:%.*]] to <225 x double>*
92 ; CHECK-NEXT: [[CONV6:%.*]] = zext i32 [[I:%.*]] to i64
93 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast [225 x double]* [[B:%.*]] to <225 x double>*
94 ; CHECK-NEXT: [[CMP212_NOT:%.*]] = icmp eq i32 [[I]], 0
95 ; CHECK-NEXT: br i1 [[CMP212_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
96 ; CHECK: for.cond1.preheader.us.preheader:
97 ; CHECK-NEXT: [[DOTPRE_PRE:%.*]] = load <225 x double>, <225 x double>* [[TMP1]], align 8
98 ; CHECK-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]]
99 ; CHECK: for.cond1.preheader.us:
100 ; CHECK-NEXT: [[DOTPRE:%.*]] = phi <225 x double> [ [[MATINS_US:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]] ], [ [[DOTPRE_PRE]], [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
101 ; CHECK-NEXT: [[J_014_US:%.*]] = phi i32 [ [[INC13_US:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ]
102 ; CHECK-NEXT: [[CONV5_US:%.*]] = zext i32 [[J_014_US]] to i64
103 ; CHECK-NEXT: [[TMP2:%.*]] = mul nuw nsw i64 [[CONV5_US]], 15
104 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], [[CONV6]]
105 ; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP3]], 225
106 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP4]])
107 ; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]]
108 ; CHECK: for.body4.us:
109 ; CHECK-NEXT: [[TMP5:%.*]] = phi <225 x double> [ [[DOTPRE]], [[FOR_COND1_PREHEADER_US]] ], [ [[MATINS_US]], [[FOR_BODY4_US]] ]
110 ; CHECK-NEXT: [[K_013_US:%.*]] = phi i32 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[FOR_BODY4_US]] ]
111 ; CHECK-NEXT: [[CONV_US:%.*]] = zext i32 [[K_013_US]] to i64
112 ; CHECK-NEXT: [[TMP6:%.*]] = add nuw nsw i64 [[TMP2]], [[CONV_US]]
113 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ult i64 [[TMP6]], 225
114 ; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP7]])
115 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds <225 x double>, <225 x double>* [[TMP0]], i64 0, i64 [[TMP6]]
116 ; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, double* [[TMP8]], align 8
117 ; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = extractelement <225 x double> [[TMP5]], i64 [[TMP3]]
118 ; CHECK-NEXT: [[MUL_US:%.*]] = fmul double [[MATRIXEXT_US]], [[MATRIXEXT8_US]]
119 ; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = extractelement <225 x double> [[TMP5]], i64 [[TMP6]]
120 ; CHECK-NEXT: [[SUB_US:%.*]] = fsub double [[MATRIXEXT11_US]], [[MUL_US]]
121 ; CHECK-NEXT: [[MATINS_US]] = insertelement <225 x double> [[TMP5]], double [[SUB_US]], i64 [[TMP6]]
122 ; CHECK-NEXT: store <225 x double> [[MATINS_US]], <225 x double>* [[TMP1]], align 8
123 ; CHECK-NEXT: [[INC_US]] = add nuw i32 [[K_013_US]], 1
124 ; CHECK-NEXT: [[CMP2_US:%.*]] = icmp ult i32 [[INC_US]], [[I]]
125 ; CHECK-NEXT: br i1 [[CMP2_US]], label [[FOR_BODY4_US]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]]
126 ; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us:
127 ; CHECK-NEXT: [[INC13_US]] = add nuw nsw i32 [[J_014_US]], 1
128 ; CHECK-NEXT: [[CMP_US:%.*]] = icmp ult i32 [[J_014_US]], 3
129 ; CHECK-NEXT: br i1 [[CMP_US]], label [[FOR_COND1_PREHEADER_US]], label [[FOR_COND_CLEANUP]]
130 ; CHECK: for.cond.cleanup:
131 ; CHECK-NEXT: ret void
134 %i.addr = alloca i32, align 4
135 %A.addr = alloca [225 x double]*, align 8
136 %B.addr = alloca [225 x double]*, align 8
137 %j = alloca i32, align 4
138 %cleanup.dest.slot = alloca i32, align 4
139 %k = alloca i32, align 4
140 store i32 %i, i32* %i.addr, align 4
141 store [225 x double]* %A, [225 x double]** %A.addr, align 8
142 store [225 x double]* %B, [225 x double]** %B.addr, align 8
143 %0 = bitcast i32* %j to i8*
144 call void @llvm.lifetime.start.p0i8(i64 4, i8* %0) #3
145 store i32 0, i32* %j, align 4
148 for.cond: ; preds = %for.inc12, %entry
149 %1 = load i32, i32* %j, align 4
150 %cmp = icmp ult i32 %1, 4
151 br i1 %cmp, label %for.body, label %for.cond.cleanup
153 for.cond.cleanup: ; preds = %for.cond
154 store i32 2, i32* %cleanup.dest.slot, align 4
155 %2 = bitcast i32* %j to i8*
156 call void @llvm.lifetime.end.p0i8(i64 4, i8* %2) #3
159 for.body: ; preds = %for.cond
160 %3 = bitcast i32* %k to i8*
161 call void @llvm.lifetime.start.p0i8(i64 4, i8* %3) #3
162 store i32 0, i32* %k, align 4
165 for.cond1: ; preds = %for.inc, %for.body
166 %4 = load i32, i32* %k, align 4
167 %5 = load i32, i32* %i.addr, align 4
168 %cmp2 = icmp ult i32 %4, %5
169 br i1 %cmp2, label %for.body4, label %for.cond.cleanup3
171 for.cond.cleanup3: ; preds = %for.cond1
172 store i32 5, i32* %cleanup.dest.slot, align 4
173 %6 = bitcast i32* %k to i8*
174 call void @llvm.lifetime.end.p0i8(i64 4, i8* %6) #3
177 for.body4: ; preds = %for.cond1
178 %7 = load i32, i32* %k, align 4
179 %conv = zext i32 %7 to i64
180 %8 = load i32, i32* %j, align 4
181 %conv5 = zext i32 %8 to i64
182 %9 = mul i64 %conv5, 15
183 %10 = add i64 %9, %conv
184 %11 = icmp ult i64 %10, 225
185 call void @llvm.assume(i1 %11)
186 %12 = load [225 x double]*, [225 x double]** %A.addr, align 8
187 %13 = bitcast [225 x double]* %12 to <225 x double>*
188 %14 = load <225 x double>, <225 x double>* %13, align 8
189 %matrixext = extractelement <225 x double> %14, i64 %10
190 %15 = load i32, i32* %i.addr, align 4
191 %conv6 = zext i32 %15 to i64
192 %16 = load i32, i32* %j, align 4
193 %conv7 = zext i32 %16 to i64
194 %17 = mul i64 %conv7, 15
195 %18 = add i64 %17, %conv6
196 %19 = icmp ult i64 %18, 225
197 call void @llvm.assume(i1 %19)
198 %20 = load [225 x double]*, [225 x double]** %B.addr, align 8
199 %21 = bitcast [225 x double]* %20 to <225 x double>*
200 %22 = load <225 x double>, <225 x double>* %21, align 8
201 %matrixext8 = extractelement <225 x double> %22, i64 %18
202 %mul = fmul double %matrixext, %matrixext8
203 %23 = load [225 x double]*, [225 x double]** %B.addr, align 8
204 %24 = load i32, i32* %k, align 4
205 %conv9 = zext i32 %24 to i64
206 %25 = load i32, i32* %j, align 4
207 %conv10 = zext i32 %25 to i64
208 %26 = mul i64 %conv10, 15
209 %27 = add i64 %26, %conv9
210 %28 = bitcast [225 x double]* %23 to <225 x double>*
211 %29 = icmp ult i64 %27, 225
212 call void @llvm.assume(i1 %29)
213 %30 = load <225 x double>, <225 x double>* %28, align 8
214 %matrixext11 = extractelement <225 x double> %30, i64 %27
215 %sub = fsub double %matrixext11, %mul
216 %31 = icmp ult i64 %27, 225
217 call void @llvm.assume(i1 %31)
218 %32 = load <225 x double>, <225 x double>* %28, align 8
219 %matins = insertelement <225 x double> %32, double %sub, i64 %27
220 store <225 x double> %matins, <225 x double>* %28, align 8
223 for.inc: ; preds = %for.body4
224 %33 = load i32, i32* %k, align 4
225 %inc = add i32 %33, 1
226 store i32 %inc, i32* %k, align 4
229 for.end: ; preds = %for.cond.cleanup3
232 for.inc12: ; preds = %for.end
233 %34 = load i32, i32* %j, align 4
234 %inc13 = add i32 %34, 1
235 store i32 %inc13, i32* %j, align 4
238 for.end14: ; preds = %for.cond.cleanup
242 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
243 declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #1
245 ; Function Attrs: inaccessiblememonly nofree nosync nounwind willreturn
246 declare void @llvm.assume(i1 noundef) #2
248 ; Function Attrs: argmemonly nofree nosync nounwind willreturn
249 declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #1
251 ; Function Attrs: nounwind ssp uwtable mustprogress