1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-fast -verify-machineinstrs -o - %s | FileCheck %s
3 # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=regbankselect -regbankselect-greedy -verify-machineinstrs -o - %s | FileCheck %s
7 # Generate the 3 operand vector bitfield extract instructions for 32-bit
10 name: test_sbfx_s32_vvv
15 liveins: $vgpr0, $vgpr1, $vgpr2
17 ; CHECK-LABEL: name: test_sbfx_s32_vvv
18 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
19 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
20 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
21 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY1]](s32), [[COPY2]]
22 ; CHECK: $vgpr0 = COPY [[SBFX]](s32)
23 %0:_(s32) = COPY $vgpr0
24 %1:_(s32) = COPY $vgpr1
25 %2:_(s32) = COPY $vgpr2
26 %3:_(s32) = G_SBFX %0, %1(s32), %2
31 name: test_sbfx_s32_vii
38 ; CHECK-LABEL: name: test_sbfx_s32_vii
39 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
40 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
41 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
42 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
43 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
44 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY1]](s32), [[COPY2]]
45 ; CHECK: $vgpr0 = COPY [[SBFX]](s32)
46 %0:_(s32) = COPY $vgpr0
47 %1:_(s32) = G_CONSTANT i32 10
48 %2:_(s32) = G_CONSTANT i32 4
49 %3:_(s32) = G_SBFX %0, %1(s32), %2
54 name: test_sbfx_s32_vss
59 liveins: $vgpr0, $sgpr0, $sgpr1
61 ; CHECK-LABEL: name: test_sbfx_s32_vss
62 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
63 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
64 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
65 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32)
66 ; CHECK: [[COPY4:%[0-9]+]]:vgpr(s32) = COPY [[COPY2]](s32)
67 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY]], [[COPY3]](s32), [[COPY4]]
68 ; CHECK: $vgpr0 = COPY [[SBFX]](s32)
69 %0:_(s32) = COPY $vgpr0
70 %1:_(s32) = COPY $sgpr0
71 %2:_(s32) = COPY $sgpr1
72 %3:_(s32) = G_SBFX %0, %1(s32), %2
76 # Expand to a sequence that implements the 64-bit bitfield extract using
79 name: test_sbfx_s64_vvv
84 liveins: $vgpr0_vgpr1, $vgpr2, $vgpr3
86 ; CHECK-LABEL: name: test_sbfx_s64_vvv
87 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
88 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr2
89 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr3
90 ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32)
91 ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
92 ; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
93 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
94 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[ASHR]], [[SUB]](s32)
95 ; CHECK: [[ASHR1:%[0-9]+]]:vgpr(s64) = G_ASHR [[SHL]], [[SUB]](s32)
96 ; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
97 %0:_(s64) = COPY $vgpr0_vgpr1
98 %1:_(s32) = COPY $vgpr2
99 %2:_(s32) = COPY $vgpr3
100 %3:_(s64) = G_SBFX %0, %1(s32), %2
101 $vgpr0_vgpr1 = COPY %3(s64)
105 name: test_sbfx_s64_vss
110 liveins: $vgpr0_vgpr1, $sgpr0, $sgpr1
112 ; CHECK-LABEL: name: test_sbfx_s64_vss
113 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
114 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
115 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
116 ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32)
117 ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
118 ; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
119 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
120 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[ASHR]], [[SUB]](s32)
121 ; CHECK: [[ASHR1:%[0-9]+]]:vgpr(s64) = G_ASHR [[SHL]], [[SUB]](s32)
122 ; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
123 %0:_(s64) = COPY $vgpr0_vgpr1
124 %1:_(s32) = COPY $vgpr0
125 %2:_(s32) = COPY $vgpr1
126 %3:_(s64) = G_SBFX %0, %1(s32), %2
127 $vgpr0_vgpr1 = COPY %3(s64)
130 # If the offset and width are constants, use the 32-bit bitfield extract,
131 # and merge to create a 64-bit result.
133 name: test_sbfx_s64_vii_small
138 liveins: $vgpr0_vgpr1
140 ; CHECK-LABEL: name: test_sbfx_s64_vii_small
141 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
142 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 31
143 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 4
144 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
145 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
146 ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32)
147 ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
148 ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
149 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[UV]], [[C2]](s32), [[COPY2]]
150 ; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 31
151 ; CHECK: [[ASHR1:%[0-9]+]]:vgpr(s32) = G_ASHR [[SBFX]], [[C3]](s32)
152 ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[SBFX]](s32), [[ASHR1]](s32)
153 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64)
154 %0:_(s64) = COPY $vgpr0_vgpr1
155 %1:_(s32) = G_CONSTANT i32 31
156 %2:_(s32) = G_CONSTANT i32 4
157 %3:_(s64) = G_SBFX %0, %1(s32), %2
158 $vgpr0_vgpr1 = COPY %3(s64)
162 name: test_sbfx_s64_vii_big
167 liveins: $vgpr0_vgpr1
169 ; CHECK-LABEL: name: test_sbfx_s64_vii_big
170 ; CHECK: [[COPY:%[0-9]+]]:vgpr(s64) = COPY $vgpr0_vgpr1
171 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 8
172 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 40
173 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
174 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
175 ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY]], [[COPY1]](s32)
176 ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
177 ; CHECK: [[C2:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 0
178 ; CHECK: [[C3:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 8
179 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[UV1]], [[C2]](s32), [[C3]]
180 ; CHECK: [[MV:%[0-9]+]]:vgpr(s64) = G_MERGE_VALUES [[UV]](s32), [[SBFX]](s32)
181 ; CHECK: $vgpr0_vgpr1 = COPY [[MV]](s64)
182 %0:_(s64) = COPY $vgpr0_vgpr1
183 %1:_(s32) = G_CONSTANT i32 8
184 %2:_(s32) = G_CONSTANT i32 40
185 %3:_(s64) = G_SBFX %0, %1(s32), %2
186 $vgpr0_vgpr1 = COPY %3(s64)
190 name: test_sbfx_s64_svv
195 liveins: $sgpr0_sgpr1, $vgpr0, $vgpr1
197 ; CHECK-LABEL: name: test_sbfx_s64_svv
198 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s64) = COPY $sgpr0_sgpr1
199 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
200 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
201 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s64) = COPY [[COPY]](s64)
202 ; CHECK: [[ASHR:%[0-9]+]]:vgpr(s64) = G_ASHR [[COPY3]], [[COPY1]](s32)
203 ; CHECK: [[UV:%[0-9]+]]:vgpr(s32), [[UV1:%[0-9]+]]:vgpr(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
204 ; CHECK: [[C:%[0-9]+]]:vgpr(s32) = G_CONSTANT i32 64
205 ; CHECK: [[SUB:%[0-9]+]]:vgpr(s32) = G_SUB [[C]], [[COPY2]]
206 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s64) = G_SHL [[ASHR]], [[SUB]](s32)
207 ; CHECK: [[ASHR1:%[0-9]+]]:vgpr(s64) = G_ASHR [[SHL]], [[SUB]](s32)
208 ; CHECK: $vgpr0_vgpr1 = COPY %3:vgpr(s64)
209 %0:_(s64) = COPY $sgpr0_sgpr1
210 %1:_(s32) = COPY $vgpr0
211 %2:_(s32) = COPY $vgpr1
212 %3:_(s64) = G_SBFX %0, %1(s32), %2
213 $vgpr0_vgpr1 = COPY %3(s64)
216 # Expand to a sequence that combines the offset and width for the two operand
217 # version of the 32-bit instruction.
219 name: test_sbfx_s32_svv
224 liveins: $sgpr0, $vgpr0, $vgpr1
226 ; CHECK-LABEL: name: test_sbfx_s32_svv
227 ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
228 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
229 ; CHECK: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY $vgpr1
230 ; CHECK: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY]](s32)
231 ; CHECK: [[SBFX:%[0-9]+]]:vgpr(s32) = G_SBFX [[COPY3]], [[COPY1]](s32), [[COPY2]]
232 ; CHECK: $vgpr0 = COPY [[SBFX]](s32)
233 %0:_(s32) = COPY $sgpr0
234 %1:_(s32) = COPY $vgpr0
235 %2:_(s32) = COPY $vgpr1
236 %3:_(s32) = G_SBFX %0, %1(s32), %2
237 $vgpr0 = COPY %3(s32)
241 name: test_sbfx_s32_sss
246 liveins: $sgpr0, $sgpr1, $sgpr3
248 ; CHECK-LABEL: name: test_sbfx_s32_sss
249 ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0
250 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
251 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2
252 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
253 ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]]
254 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
255 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32)
256 ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
257 ; CHECK: [[S_BFE_I32_:%[0-9]+]]:sreg_32(s32) = S_BFE_I32 [[COPY]](s32), [[OR]](s32), implicit-def $scc
258 ; CHECK: $sgpr0 = COPY [[S_BFE_I32_]](s32)
259 %0:_(s32) = COPY $sgpr0
260 %1:_(s32) = COPY $sgpr1
261 %2:_(s32) = COPY $sgpr2
262 %3:_(s32) = G_SBFX %0, %1(s32), %2
263 $sgpr0 = COPY %3(s32)
267 name: test_sbfx_s32_sii
274 ; CHECK-LABEL: name: test_sbfx_s32_sii
275 ; CHECK: [[COPY:%[0-9]+]]:sreg_32(s32) = COPY $sgpr0
276 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
277 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
278 ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
279 ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]]
280 ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
281 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32)
282 ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
283 ; CHECK: [[S_BFE_I32_:%[0-9]+]]:sreg_32(s32) = S_BFE_I32 [[COPY]](s32), [[OR]](s32), implicit-def $scc
284 ; CHECK: $sgpr0 = COPY [[S_BFE_I32_]](s32)
285 %0:_(s32) = COPY $sgpr0
286 %1:_(s32) = G_CONSTANT i32 1
287 %2:_(s32) = G_CONSTANT i32 10
288 %3:_(s32) = G_SBFX %0, %1(s32), %2
289 $sgpr0 = COPY %3(s32)
292 # Expand to a sequence that combines the offset and width for the two operand
293 # version of the 64-bit scalar instruction.
295 name: test_sbfx_s64_sss
300 liveins: $sgpr0_sgpr1, $sgpr0, $sgpr1
302 ; CHECK-LABEL: name: test_sbfx_s64_sss
303 ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1
304 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0
305 ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1
306 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
307 ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[COPY1]], [[C]]
308 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
309 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY2]], [[C1]](s32)
310 ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
311 ; CHECK: [[S_BFE_I64_:%[0-9]+]]:sreg_64(s64) = S_BFE_I64 [[COPY]](s64), [[OR]](s32), implicit-def $scc
312 ; CHECK: $sgpr0_sgpr1 = COPY [[S_BFE_I64_]](s64)
313 %0:_(s64) = COPY $sgpr0_sgpr1
314 %1:_(s32) = COPY $sgpr0
315 %2:_(s32) = COPY $sgpr1
316 %3:_(s64) = G_SBFX %0, %1(s32), %2
317 $sgpr0_sgpr1 = COPY %3(s64)
321 name: test_sbfx_s64_sii
326 liveins: $sgpr0_sgpr1
328 ; CHECK-LABEL: name: test_sbfx_s64_sii
329 ; CHECK: [[COPY:%[0-9]+]]:sreg_64(s64) = COPY $sgpr0_sgpr1
330 ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 1
331 ; CHECK: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 10
332 ; CHECK: [[C2:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 63
333 ; CHECK: [[AND:%[0-9]+]]:sgpr(s32) = G_AND [[C]], [[C2]]
334 ; CHECK: [[C3:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 16
335 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[C1]], [[C3]](s32)
336 ; CHECK: [[OR:%[0-9]+]]:sreg_32(s32) = G_OR [[AND]], [[SHL]]
337 ; CHECK: [[S_BFE_I64_:%[0-9]+]]:sreg_64(s64) = S_BFE_I64 [[COPY]](s64), [[OR]](s32), implicit-def $scc
338 %0:_(s64) = COPY $sgpr0_sgpr1
339 %1:_(s32) = G_CONSTANT i32 1
340 %2:_(s32) = G_CONSTANT i32 10
341 %3:_(s64) = G_SBFX %0, %1(s32), %2