1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt --codegen-opt-level=2 -mtriple=x86_64 -lower-amx-type %s -S | FileCheck %s
4 define void @undef_2phi(ptr%buf) {
5 ; CHECK-LABEL: @undef_2phi(
7 ; CHECK-NEXT: [[TMP0:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
8 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
10 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
11 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[L3:%.*]]
13 ; CHECK-NEXT: [[TMP1:%.*]] = phi x86_amx [ [[TMP0]], [[ENTRY:%.*]] ], [ [[T1]], [[L1]] ]
14 ; CHECK-NEXT: br i1 undef, label [[L3]], label [[EXIT:%.*]]
16 ; CHECK-NEXT: [[TMP2:%.*]] = phi x86_amx [ [[TMP1]], [[L2]] ], [ [[T1]], [[L1]] ]
17 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[BUF:%.*]], i64 1024, x86_amx [[TMP2]])
18 ; CHECK-NEXT: br label [[EXIT]]
20 ; CHECK-NEXT: ret void
23 br i1 undef, label %l1, label %l2
26 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
27 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
28 br i1 undef, label %l2, label %l3
31 %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
32 br i1 undef, label %l3, label %exit
35 %t4 = phi <256 x i32> [ %t3, %l2], [ %t2, %l1 ]
36 %t5 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t4)
37 call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr %buf, i64 1024, x86_amx %t5)
44 define void @foo_undef(ptr%buf) {
45 ; CHECK-LABEL: @foo_undef(
47 ; CHECK-NEXT: [[TMP0:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
48 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
50 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
51 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
53 ; CHECK-NEXT: [[TMP1:%.*]] = phi x86_amx [ [[TMP0]], [[ENTRY:%.*]] ], [ [[T1]], [[L1]] ]
54 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[BUF:%.*]], i64 1024, x86_amx [[TMP1]])
55 ; CHECK-NEXT: br label [[EXIT]]
57 ; CHECK-NEXT: ret void
60 br i1 undef, label %l1, label %l2
63 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
64 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
65 br i1 undef, label %l2, label %exit
68 %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
69 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
70 call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr %buf, i64 1024, x86_amx %t4)
77 define void @foo_zero(ptr%buf) {
78 ; CHECK-LABEL: @foo_zero(
80 ; CHECK-NEXT: [[TMP0:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
81 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
83 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
84 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
86 ; CHECK-NEXT: [[TMP1:%.*]] = phi x86_amx [ [[TMP0]], [[ENTRY:%.*]] ], [ [[T1]], [[L1]] ]
87 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[BUF:%.*]], i64 1024, x86_amx [[TMP1]])
88 ; CHECK-NEXT: br label [[EXIT]]
90 ; CHECK-NEXT: ret void
93 br i1 undef, label %l1, label %l2
96 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
97 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
98 br i1 undef, label %l2, label %exit
101 %t3 = phi <256 x i32> [ zeroinitializer, %entry ], [ %t2, %l1 ]
102 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
103 call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr %buf, i64 1024, x86_amx %t4)
110 define void @foo_vrow(ptr%buf, i16 %row) {
111 ; CHECK-LABEL: @foo_vrow(
113 ; CHECK-NEXT: [[TMP0:%.*]] = alloca <256 x i32>, align 64
114 ; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
115 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
117 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 [[ROW:%.*]], i16 32)
118 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 32, ptr [[TMP1]], i64 32, x86_amx [[T1]])
119 ; CHECK-NEXT: [[TMP3:%.*]] = load <256 x i32>, ptr [[TMP1]], align 1024
120 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
122 ; CHECK-NEXT: [[T3:%.*]] = phi <256 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP3]], [[L1]] ]
123 ; CHECK-NEXT: store <256 x i32> [[T3]], ptr [[TMP0]], align 1024
124 ; CHECK-NEXT: [[TMP5:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 [[ROW]], i16 32, ptr [[TMP0]], i64 32)
125 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 [[ROW]], i16 32, ptr [[BUF:%.*]], i64 1024, x86_amx [[TMP5]])
126 ; CHECK-NEXT: br label [[EXIT]]
128 ; CHECK-NEXT: ret void
131 br i1 undef, label %l1, label %l2
134 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 %row, i16 32)
135 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
136 br i1 undef, label %l2, label %exit
139 %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
140 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
141 call void @llvm.x86.tilestored64.internal(i16 %row, i16 32, ptr %buf, i64 1024, x86_amx %t4)
148 define void @foo_vcol(ptr%buf, i16 %col) {
149 ; CHECK-LABEL: @foo_vcol(
151 ; CHECK-NEXT: [[TMP0:%.*]] = alloca <256 x i32>, align 64
152 ; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64
153 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
155 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 [[COL:%.*]])
156 ; CHECK-NEXT: [[TMP3:%.*]] = sext i16 [[COL]] to i64
157 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], ptr [[TMP1]], i64 [[TMP3]], x86_amx [[T1]])
158 ; CHECK-NEXT: [[TMP4:%.*]] = load <256 x i32>, ptr [[TMP1]], align 1024
159 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
161 ; CHECK-NEXT: [[T3:%.*]] = phi <256 x i32> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP4]], [[L1]] ]
162 ; CHECK-NEXT: store <256 x i32> [[T3]], ptr [[TMP0]], align 1024
163 ; CHECK-NEXT: [[TMP6:%.*]] = sext i16 [[COL]] to i64
164 ; CHECK-NEXT: [[TMP7:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 [[COL]], ptr [[TMP0]], i64 [[TMP6]])
165 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 [[COL]], ptr [[BUF:%.*]], i64 1024, x86_amx [[TMP7]])
166 ; CHECK-NEXT: br label [[EXIT]]
168 ; CHECK-NEXT: ret void
171 br i1 undef, label %l1, label %l2
174 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 %col)
175 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
176 br i1 undef, label %l2, label %exit
179 %t3 = phi <256 x i32> [ zeroinitializer, %entry ], [ %t2, %l1 ]
180 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
181 call void @llvm.x86.tilestored64.internal(i16 8, i16 %col, ptr %buf, i64 1024, x86_amx %t4)
188 define void @noshape(ptr%buf) {
189 ; CHECK-LABEL: @noshape(
191 ; CHECK-NEXT: [[TMP0:%.*]] = alloca <256 x i32>, align 64
192 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
194 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
195 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[TMP0]], i64 32, x86_amx [[T1]])
196 ; CHECK-NEXT: [[TMP2:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
197 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
199 ; CHECK-NEXT: [[T3:%.*]] = phi <256 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP2]], [[L1]] ]
200 ; CHECK-NEXT: store <256 x i32> [[T3]], ptr [[BUF:%.*]], align 1024
201 ; CHECK-NEXT: br label [[EXIT]]
203 ; CHECK-NEXT: ret void
206 br i1 undef, label %l1, label %l2
209 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
210 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
211 br i1 undef, label %l2, label %exit
214 %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
215 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
216 %t5 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t4)
217 store <256 x i32> %t5, ptr %buf
224 define void @noshape2(ptr%buf) {
225 ; CHECK-LABEL: @noshape2(
227 ; CHECK-NEXT: [[TMP0:%.*]] = alloca <256 x i32>, align 64
228 ; CHECK-NEXT: br i1 undef, label [[L1:%.*]], label [[L2:%.*]]
230 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
231 ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 8, i16 32, ptr [[TMP0]], i64 32, x86_amx [[T1]])
232 ; CHECK-NEXT: [[TMP2:%.*]] = load <256 x i32>, ptr [[TMP0]], align 1024
233 ; CHECK-NEXT: br i1 undef, label [[L2]], label [[EXIT:%.*]]
235 ; CHECK-NEXT: [[T3:%.*]] = phi <256 x i32> [ undef, [[ENTRY:%.*]] ], [ [[TMP2]], [[L1]] ]
236 ; CHECK-NEXT: [[T6:%.*]] = call <256 x i32> @llvm.abs.v256i32(<256 x i32> [[T3]], i1 true)
237 ; CHECK-NEXT: store <256 x i32> [[T6]], ptr [[BUF:%.*]], align 1024
238 ; CHECK-NEXT: br label [[EXIT]]
240 ; CHECK-NEXT: ret void
243 br i1 undef, label %l1, label %l2
246 %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 8, i16 32)
247 %t2 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t1)
248 br i1 undef, label %l2, label %exit
251 %t3 = phi <256 x i32> [ undef, %entry ], [ %t2, %l1 ]
252 %t4 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32> %t3)
253 %t5 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %t4)
254 %t6 = call <256 x i32> @llvm.abs.v256i32(<256 x i32> %t5, i1 1)
255 store <256 x i32> %t6, ptr %buf
262 declare <256 x i32> @llvm.abs.v256i32(<256 x i32>, i1)
263 declare x86_amx @llvm.x86.tilezero.internal(i16, i16)
264 declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, ptr, i64)
265 declare <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx)
266 declare x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x i32>)
267 declare void @llvm.x86.tilestored64.internal(i16, i16, ptr, i64, x86_amx)
268 declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)