1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc %s -o - -mtriple=riscv64 -mattr=v \
3 # RUN: -run-pass=riscv-insert-vsetvli | FileCheck %s
6 source_filename = "vsetvli-insert.ll"
7 target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
8 target triple = "riscv64"
10 define <vscale x 1 x i64> @add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) #0 {
12 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2)
13 ret <vscale x 1 x i64> %a
16 define <vscale x 1 x i64> @load_add(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
18 %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
19 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
20 ret <vscale x 1 x i64> %b
23 define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
25 %a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %1)
26 %b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
27 ret <vscale x 1 x i64> %b
30 declare i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64>) #1
32 define i64 @vmv_x_s(<vscale x 1 x i64> %0) #0 {
34 %a = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> %0)
38 define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) #0 {
39 %a = load <2 x i64>, <2 x i64>* %x, align 16
40 %b = load <2 x i64>, <2 x i64>* %y, align 16
41 %c = add <2 x i64> %a, %b
42 store <2 x i64> %c, <2 x i64>* %x, align 16
46 declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) #2
48 define i64 @vreduce_add_v2i64(<2 x i64>* %x) #0 {
49 %v = load <2 x i64>, <2 x i64>* %x, align 16
50 %red = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> %v)
54 declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) #3
56 define <vscale x 1 x i64> @vsetvli_add(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %avl) #0 {
58 %a = call i64 @llvm.riscv.vsetvli.i64(i64 %avl, i64 3, i64 0)
59 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %a)
60 ret <vscale x 1 x i64> %b
63 define <vscale x 1 x i64> @load_add_inlineasm(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i64 %2) #0 {
65 %a = call <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64>* %0, i64 %2)
66 call void asm sideeffect "", ""()
67 %b = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> %a, <vscale x 1 x i64> %1, i64 %2)
68 ret <vscale x 1 x i64> %b
71 define void @vmv_v_i_different_lmuls() {
75 declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
77 declare <vscale x 1 x i64> @llvm.riscv.vle.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
79 declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #4
81 declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
83 attributes #0 = { "target-features"="+v" }
84 attributes #1 = { nounwind readnone }
85 attributes #2 = { nofree nosync nounwind readnone willreturn }
86 attributes #3 = { nounwind }
87 attributes #4 = { nounwind readonly }
93 tracksRegLiveness: true
95 - { id: 0, class: vr }
96 - { id: 1, class: vr }
97 - { id: 2, class: gprnox0 }
98 - { id: 3, class: vr }
100 - { reg: '$v8', virtual-reg: '%0' }
101 - { reg: '$v9', virtual-reg: '%1' }
102 - { reg: '$x10', virtual-reg: '%2' }
105 machineFunctionInfo: {}
108 liveins: $v8, $v9, $x10
110 ; CHECK-LABEL: name: add
111 ; CHECK: liveins: $v8, $v9, $x10
113 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
114 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
115 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
116 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
117 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
118 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
119 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
120 ; CHECK-NEXT: PseudoRET implicit $v8
121 %2:gprnox0 = COPY $x10
124 %pt:vr = IMPLICIT_DEF
125 %3:vr = PseudoVADD_VV_M1 %pt, %0, %1, %2, 6, 0
127 PseudoRET implicit $v8
133 tracksRegLiveness: true
135 - { id: 0, class: gpr }
136 - { id: 1, class: vr }
137 - { id: 2, class: gprnox0 }
138 - { id: 3, class: vr }
139 - { id: 4, class: vr }
141 - { reg: '$x10', virtual-reg: '%0' }
142 - { reg: '$v8', virtual-reg: '%1' }
143 - { reg: '$x11', virtual-reg: '%2' }
146 machineFunctionInfo: {}
149 liveins: $x10, $v8, $x11
151 ; CHECK-LABEL: name: load_add
152 ; CHECK: liveins: $x10, $v8, $x11
154 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
155 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
156 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
157 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
158 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
159 ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
160 ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
161 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
162 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
163 ; CHECK-NEXT: PseudoRET implicit $v8
164 %2:gprnox0 = COPY $x11
167 %pt:vr = IMPLICIT_DEF
168 %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
169 %pt2:vr = IMPLICIT_DEF
170 %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
172 PseudoRET implicit $v8
178 tracksRegLiveness: true
180 - { id: 0, class: gpr }
181 - { id: 1, class: gprnox0 }
182 - { id: 2, class: vr }
183 - { id: 3, class: vr }
185 - { reg: '$x10', virtual-reg: '%0' }
186 - { reg: '$x11', virtual-reg: '%1' }
189 machineFunctionInfo: {}
194 ; CHECK-LABEL: name: load_zext
195 ; CHECK: liveins: $x10, $x11
197 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
198 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
199 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
200 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
201 ; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 %pt, [[COPY1]], $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
202 ; CHECK-NEXT: %dead:vr = IMPLICIT_DEF
203 ; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
204 ; CHECK-NEXT: $v8 = COPY %3
205 ; CHECK-NEXT: PseudoRET implicit $v8
206 %1:gprnox0 = COPY $x11
208 %pt:vr = IMPLICIT_DEF
209 %2:vr = PseudoVLE32_V_MF2 %pt, %0, %1, 5, 0
210 %dead:vr = IMPLICIT_DEF
211 early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed %2, %1, 6, 0
213 PseudoRET implicit $v8
219 tracksRegLiveness: true
221 - { id: 0, class: vr }
222 - { id: 1, class: gpr }
224 - { reg: '$v8', virtual-reg: '%0' }
227 machineFunctionInfo: {}
232 ; CHECK-LABEL: name: vmv_x_s
233 ; CHECK: liveins: $v8
235 ; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
236 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
237 ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 [[COPY]], 6 /* e64 */, implicit $vtype
238 ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
239 ; CHECK-NEXT: PseudoRET implicit $x10
241 %1:gpr = PseudoVMV_X_S_M1 %0, 6
243 PseudoRET implicit $x10
249 tracksRegLiveness: true
251 - { id: 0, class: gpr }
252 - { id: 1, class: gpr }
253 - { id: 2, class: vr }
254 - { id: 3, class: vr }
255 - { id: 4, class: vr }
257 - { reg: '$x10', virtual-reg: '%0' }
258 - { reg: '$x11', virtual-reg: '%1' }
261 machineFunctionInfo: {}
266 ; CHECK-LABEL: name: add_v2i64
267 ; CHECK: liveins: $x10, $x11
269 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x11
270 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
271 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
272 ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
273 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
274 ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
275 ; CHECK-NEXT: [[PseudoVLE64_V_M1_1:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt2, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.y)
276 ; CHECK-NEXT: %pt3:vr = IMPLICIT_DEF
277 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt3, killed [[PseudoVLE64_V_M1_]], killed [[PseudoVLE64_V_M1_1]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
278 ; CHECK-NEXT: PseudoVSE64_V_M1 killed [[PseudoVADD_VV_M1_]], [[COPY1]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (store (s128) into %ir.x)
279 ; CHECK-NEXT: PseudoRET
282 %pt:vr = IMPLICIT_DEF
283 %pt2:vr = IMPLICIT_DEF
284 %2:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
285 %3:vr = PseudoVLE64_V_M1 %pt2, %1, 2, 6, 0 :: (load (s128) from %ir.y)
286 %pt3:vr = IMPLICIT_DEF
287 %4:vr = PseudoVADD_VV_M1 %pt3, killed %2, killed %3, 2, 6, 0
288 PseudoVSE64_V_M1 killed %4, %0, 2, 6 :: (store (s128) into %ir.x)
293 name: vreduce_add_v2i64
295 tracksRegLiveness: true
297 - { id: 0, class: gpr }
298 - { id: 1, class: vr }
299 - { id: 2, class: vr }
300 - { id: 3, class: vr }
301 - { id: 4, class: vr }
302 - { id: 5, class: gpr }
304 - { reg: '$x10', virtual-reg: '%0' }
307 machineFunctionInfo: {}
312 ; CHECK-LABEL: name: vreduce_add_v2i64
313 ; CHECK: liveins: $x10
315 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
316 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
317 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
318 ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY]], 2, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
319 ; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 152 /* e64, m1, tu, ma */, implicit-def $vl, implicit-def $vtype
320 ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
321 ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
322 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
323 ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
324 ; CHECK-NEXT: [[PseudoVMV_X_S_M1_:%[0-9]+]]:gpr = PseudoVMV_X_S_M1 killed [[PseudoVREDSUM_VS_M1_E8_]], 6 /* e64 */, implicit $vtype
325 ; CHECK-NEXT: $x10 = COPY [[PseudoVMV_X_S_M1_]]
326 ; CHECK-NEXT: PseudoRET implicit $x10
328 %pt:vr = IMPLICIT_DEF
329 %1:vr = PseudoVLE64_V_M1 %pt, %0, 2, 6, 0 :: (load (s128) from %ir.x)
330 %2:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6, 0
332 %3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1
333 %5:gpr = PseudoVMV_X_S_M1 killed %3, 6
335 PseudoRET implicit $x10
341 tracksRegLiveness: true
343 - { id: 0, class: vr }
344 - { id: 1, class: vr }
345 - { id: 2, class: gprnox0 }
346 - { id: 3, class: gprnox0 }
347 - { id: 4, class: vr }
349 - { reg: '$v8', virtual-reg: '%0' }
350 - { reg: '$v9', virtual-reg: '%1' }
351 - { reg: '$x10', virtual-reg: '%2' }
354 machineFunctionInfo: {}
357 liveins: $v8, $v9, $x10
359 ; CHECK-LABEL: name: vsetvli_add
360 ; CHECK: liveins: $v8, $v9, $x10
362 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x10
363 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v9
364 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY $v8
365 ; CHECK-NEXT: [[PseudoVSETVLI:%[0-9]+]]:gprnox0 = PseudoVSETVLI [[COPY]], 88 /* e64, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
366 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
367 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt, [[COPY2]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
368 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
369 ; CHECK-NEXT: PseudoRET implicit $v8
370 %2:gprnox0 = COPY $x10
373 %3:gprnox0 = PseudoVSETVLI %2, 88, implicit-def dead $vl, implicit-def dead $vtype
374 %pt:vr = IMPLICIT_DEF
375 %4:vr = PseudoVADD_VV_M1 %pt, %0, %1, killed %3, 6, 0
377 PseudoRET implicit $v8
381 name: load_add_inlineasm
383 tracksRegLiveness: true
385 - { id: 0, class: gpr }
386 - { id: 1, class: vr }
387 - { id: 2, class: gprnox0 }
388 - { id: 3, class: vr }
389 - { id: 4, class: vr }
391 - { reg: '$x10', virtual-reg: '%0' }
392 - { reg: '$v8', virtual-reg: '%1' }
393 - { reg: '$x11', virtual-reg: '%2' }
396 machineFunctionInfo: {}
399 liveins: $x10, $v8, $x11
401 ; CHECK-LABEL: name: load_add_inlineasm
402 ; CHECK: liveins: $x10, $v8, $x11
404 ; CHECK-NEXT: [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
405 ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vr = COPY $v8
406 ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x10
407 ; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
408 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
409 ; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 %pt, [[COPY2]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
410 ; CHECK-NEXT: INLINEASM &"", 1 /* sideeffect attdialect */
411 ; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
412 ; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
413 ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 %pt2, killed [[PseudoVLE64_V_M1_]], [[COPY1]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
414 ; CHECK-NEXT: $v8 = COPY [[PseudoVADD_VV_M1_]]
415 ; CHECK-NEXT: PseudoRET implicit $v8
416 %2:gprnox0 = COPY $x11
419 %pt:vr = IMPLICIT_DEF
420 %3:vr = PseudoVLE64_V_M1 %pt, %0, %2, 6, 0
421 INLINEASM &"", 1 /* sideeffect attdialect */
422 %pt2:vr = IMPLICIT_DEF
423 %4:vr = PseudoVADD_VV_M1 %pt2, killed %3, %1, %2, 6, 0
425 PseudoRET implicit $v8
429 name: vmv_v_i_different_lmuls
430 tracksRegLiveness: true
433 liveins: $x10, $v8, $x11
435 ; CHECK-LABEL: name: vmv_v_i_different_lmuls
436 ; CHECK: liveins: $x10, $v8, $x11
438 ; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
439 ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
440 ; CHECK-NEXT: [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 %pt, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
441 ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 134 /* e8, mf4, tu, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
442 ; CHECK-NEXT: [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 undef [[PseudoVMV_V_I_MF4_]], 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
443 ; CHECK-NEXT: PseudoRET
444 %pt:vrm2 = IMPLICIT_DEF
445 %0:vrm2 = PseudoVID_V_M2 %pt, 4, 6, 3
446 %4:vr = PseudoVMV_V_I_MF4 undef %4, 0, 4, 3, 0