1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,ZFMIN %s
3 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfbfmin,+zvfh,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,ZFMIN %s
4 ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV32,NOZFMIN %s
5 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfhmin,+zvfbfmin -verify-machineinstrs | FileCheck --check-prefixes=CHECK,RV64,NOZFMIN %s
7 define i64 @i64(<vscale x 1 x i64> %v, i1 %c) {
10 ; RV32-NEXT: addi sp, sp, -16
11 ; RV32-NEXT: .cfi_def_cfa_offset 16
12 ; RV32-NEXT: csrr a1, vlenb
13 ; RV32-NEXT: sub sp, sp, a1
14 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
15 ; RV32-NEXT: addi a1, sp, 16
16 ; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
17 ; RV32-NEXT: andi a0, a0, 1
20 ; RV32-NEXT: beqz a0, .LBB0_2
21 ; RV32-NEXT: # %bb.1: # %truebb
22 ; RV32-NEXT: li a0, 32
23 ; RV32-NEXT: vl1r.v v9, (a1) # Unknown-size Folded Reload
24 ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
25 ; RV32-NEXT: vsrl.vx v8, v9, a0
26 ; RV32-NEXT: vmv.x.s a1, v8
27 ; RV32-NEXT: vmv.x.s a0, v9
28 ; RV32-NEXT: j .LBB0_3
29 ; RV32-NEXT: .LBB0_2: # %falsebb
31 ; RV32-NEXT: .LBB0_3: # %falsebb
32 ; RV32-NEXT: csrr a2, vlenb
33 ; RV32-NEXT: add sp, sp, a2
34 ; RV32-NEXT: .cfi_def_cfa sp, 16
35 ; RV32-NEXT: addi sp, sp, 16
36 ; RV32-NEXT: .cfi_def_cfa_offset 0
41 ; RV64-NEXT: addi sp, sp, -16
42 ; RV64-NEXT: .cfi_def_cfa_offset 16
43 ; RV64-NEXT: csrr a1, vlenb
44 ; RV64-NEXT: sub sp, sp, a1
45 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
46 ; RV64-NEXT: addi a1, sp, 16
47 ; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
48 ; RV64-NEXT: andi a0, a0, 1
51 ; RV64-NEXT: beqz a0, .LBB0_2
52 ; RV64-NEXT: # %bb.1: # %truebb
53 ; RV64-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
54 ; RV64-NEXT: .LBB0_2: # %falsebb
55 ; RV64-NEXT: csrr a1, vlenb
56 ; RV64-NEXT: add sp, sp, a1
57 ; RV64-NEXT: .cfi_def_cfa sp, 16
58 ; RV64-NEXT: addi sp, sp, 16
59 ; RV64-NEXT: .cfi_def_cfa_offset 0
61 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
62 br i1 %c, label %truebb, label %falsebb
64 %x = extractelement <vscale x 1 x i64> %v, i32 0
70 define i32 @i32(<vscale x 2 x i32> %v, i1 %c) {
73 ; CHECK-NEXT: addi sp, sp, -16
74 ; CHECK-NEXT: .cfi_def_cfa_offset 16
75 ; CHECK-NEXT: csrr a1, vlenb
76 ; CHECK-NEXT: sub sp, sp, a1
77 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
78 ; CHECK-NEXT: addi a1, sp, 16
79 ; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
80 ; CHECK-NEXT: andi a0, a0, 1
83 ; CHECK-NEXT: beqz a0, .LBB1_2
84 ; CHECK-NEXT: # %bb.1: # %truebb
85 ; CHECK-NEXT: lw a0, 16(sp) # 8-byte Folded Reload
86 ; CHECK-NEXT: .LBB1_2: # %falsebb
87 ; CHECK-NEXT: csrr a1, vlenb
88 ; CHECK-NEXT: add sp, sp, a1
89 ; CHECK-NEXT: .cfi_def_cfa sp, 16
90 ; CHECK-NEXT: addi sp, sp, 16
91 ; CHECK-NEXT: .cfi_def_cfa_offset 0
93 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
94 br i1 %c, label %truebb, label %falsebb
96 %x = extractelement <vscale x 2 x i32> %v, i32 0
102 define i16 @i16(<vscale x 4 x i16> %v, i1 %c) {
105 ; CHECK-NEXT: addi sp, sp, -16
106 ; CHECK-NEXT: .cfi_def_cfa_offset 16
107 ; CHECK-NEXT: csrr a1, vlenb
108 ; CHECK-NEXT: sub sp, sp, a1
109 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
110 ; CHECK-NEXT: addi a1, sp, 16
111 ; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
112 ; CHECK-NEXT: andi a0, a0, 1
114 ; CHECK-NEXT: #NO_APP
115 ; CHECK-NEXT: beqz a0, .LBB2_2
116 ; CHECK-NEXT: # %bb.1: # %truebb
117 ; CHECK-NEXT: lh a0, 16(sp) # 8-byte Folded Reload
118 ; CHECK-NEXT: .LBB2_2: # %falsebb
119 ; CHECK-NEXT: csrr a1, vlenb
120 ; CHECK-NEXT: add sp, sp, a1
121 ; CHECK-NEXT: .cfi_def_cfa sp, 16
122 ; CHECK-NEXT: addi sp, sp, 16
123 ; CHECK-NEXT: .cfi_def_cfa_offset 0
125 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
126 br i1 %c, label %truebb, label %falsebb
128 %x = extractelement <vscale x 4 x i16> %v, i32 0
134 define i8 @i8(<vscale x 8 x i8> %v, i1 %c) {
137 ; CHECK-NEXT: addi sp, sp, -16
138 ; CHECK-NEXT: .cfi_def_cfa_offset 16
139 ; CHECK-NEXT: csrr a1, vlenb
140 ; CHECK-NEXT: sub sp, sp, a1
141 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
142 ; CHECK-NEXT: addi a1, sp, 16
143 ; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
144 ; CHECK-NEXT: andi a0, a0, 1
146 ; CHECK-NEXT: #NO_APP
147 ; CHECK-NEXT: beqz a0, .LBB3_2
148 ; CHECK-NEXT: # %bb.1: # %truebb
149 ; CHECK-NEXT: lb a0, 16(sp) # 8-byte Folded Reload
150 ; CHECK-NEXT: .LBB3_2: # %falsebb
151 ; CHECK-NEXT: csrr a1, vlenb
152 ; CHECK-NEXT: add sp, sp, a1
153 ; CHECK-NEXT: .cfi_def_cfa sp, 16
154 ; CHECK-NEXT: addi sp, sp, 16
155 ; CHECK-NEXT: .cfi_def_cfa_offset 0
157 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
158 br i1 %c, label %truebb, label %falsebb
160 %x = extractelement <vscale x 8 x i8> %v, i32 0
166 define double @f64(<vscale x 1 x double> %v, i1 %c) {
169 ; RV32-NEXT: addi sp, sp, -16
170 ; RV32-NEXT: .cfi_def_cfa_offset 16
171 ; RV32-NEXT: csrr a1, vlenb
172 ; RV32-NEXT: sub sp, sp, a1
173 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
174 ; RV32-NEXT: addi a1, sp, 16
175 ; RV32-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
176 ; RV32-NEXT: andi a0, a0, 1
179 ; RV32-NEXT: beqz a0, .LBB4_2
180 ; RV32-NEXT: # %bb.1: # %truebb
181 ; RV32-NEXT: fld fa0, 16(sp) # 8-byte Folded Reload
182 ; RV32-NEXT: j .LBB4_3
183 ; RV32-NEXT: .LBB4_2: # %falsebb
184 ; RV32-NEXT: fcvt.d.w fa0, zero
185 ; RV32-NEXT: .LBB4_3: # %falsebb
186 ; RV32-NEXT: csrr a0, vlenb
187 ; RV32-NEXT: add sp, sp, a0
188 ; RV32-NEXT: .cfi_def_cfa sp, 16
189 ; RV32-NEXT: addi sp, sp, 16
190 ; RV32-NEXT: .cfi_def_cfa_offset 0
195 ; RV64-NEXT: addi sp, sp, -16
196 ; RV64-NEXT: .cfi_def_cfa_offset 16
197 ; RV64-NEXT: csrr a1, vlenb
198 ; RV64-NEXT: sub sp, sp, a1
199 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
200 ; RV64-NEXT: addi a1, sp, 16
201 ; RV64-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
202 ; RV64-NEXT: andi a0, a0, 1
205 ; RV64-NEXT: beqz a0, .LBB4_2
206 ; RV64-NEXT: # %bb.1: # %truebb
207 ; RV64-NEXT: fld fa0, 16(sp) # 8-byte Folded Reload
208 ; RV64-NEXT: j .LBB4_3
209 ; RV64-NEXT: .LBB4_2: # %falsebb
210 ; RV64-NEXT: fmv.d.x fa0, zero
211 ; RV64-NEXT: .LBB4_3: # %falsebb
212 ; RV64-NEXT: csrr a0, vlenb
213 ; RV64-NEXT: add sp, sp, a0
214 ; RV64-NEXT: .cfi_def_cfa sp, 16
215 ; RV64-NEXT: addi sp, sp, 16
216 ; RV64-NEXT: .cfi_def_cfa_offset 0
218 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
219 br i1 %c, label %truebb, label %falsebb
221 %x = extractelement <vscale x 1 x double> %v, i32 0
227 define float @f32(<vscale x 2 x float> %v, i1 %c) {
230 ; CHECK-NEXT: addi sp, sp, -16
231 ; CHECK-NEXT: .cfi_def_cfa_offset 16
232 ; CHECK-NEXT: csrr a1, vlenb
233 ; CHECK-NEXT: sub sp, sp, a1
234 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
235 ; CHECK-NEXT: addi a1, sp, 16
236 ; CHECK-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
237 ; CHECK-NEXT: andi a0, a0, 1
239 ; CHECK-NEXT: #NO_APP
240 ; CHECK-NEXT: beqz a0, .LBB5_2
241 ; CHECK-NEXT: # %bb.1: # %truebb
242 ; CHECK-NEXT: flw fa0, 16(sp) # 8-byte Folded Reload
243 ; CHECK-NEXT: j .LBB5_3
244 ; CHECK-NEXT: .LBB5_2: # %falsebb
245 ; CHECK-NEXT: fmv.w.x fa0, zero
246 ; CHECK-NEXT: .LBB5_3: # %falsebb
247 ; CHECK-NEXT: csrr a0, vlenb
248 ; CHECK-NEXT: add sp, sp, a0
249 ; CHECK-NEXT: .cfi_def_cfa sp, 16
250 ; CHECK-NEXT: addi sp, sp, 16
251 ; CHECK-NEXT: .cfi_def_cfa_offset 0
253 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
254 br i1 %c, label %truebb, label %falsebb
256 %x = extractelement <vscale x 2 x float> %v, i32 0
262 define half @f16(<vscale x 1 x half> %v, i1 %c) {
265 ; ZFMIN-NEXT: addi sp, sp, -16
266 ; ZFMIN-NEXT: .cfi_def_cfa_offset 16
267 ; ZFMIN-NEXT: csrr a1, vlenb
268 ; ZFMIN-NEXT: sub sp, sp, a1
269 ; ZFMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
270 ; ZFMIN-NEXT: addi a1, sp, 16
271 ; ZFMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
272 ; ZFMIN-NEXT: andi a0, a0, 1
274 ; ZFMIN-NEXT: #NO_APP
275 ; ZFMIN-NEXT: beqz a0, .LBB6_2
276 ; ZFMIN-NEXT: # %bb.1: # %truebb
277 ; ZFMIN-NEXT: flh fa0, 16(sp) # 8-byte Folded Reload
278 ; ZFMIN-NEXT: j .LBB6_3
279 ; ZFMIN-NEXT: .LBB6_2: # %falsebb
280 ; ZFMIN-NEXT: fmv.h.x fa0, zero
281 ; ZFMIN-NEXT: .LBB6_3: # %falsebb
282 ; ZFMIN-NEXT: csrr a0, vlenb
283 ; ZFMIN-NEXT: add sp, sp, a0
284 ; ZFMIN-NEXT: .cfi_def_cfa sp, 16
285 ; ZFMIN-NEXT: addi sp, sp, 16
286 ; ZFMIN-NEXT: .cfi_def_cfa_offset 0
289 ; NOZFMIN-LABEL: f16:
291 ; NOZFMIN-NEXT: addi sp, sp, -16
292 ; NOZFMIN-NEXT: .cfi_def_cfa_offset 16
293 ; NOZFMIN-NEXT: csrr a1, vlenb
294 ; NOZFMIN-NEXT: sub sp, sp, a1
295 ; NOZFMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
296 ; NOZFMIN-NEXT: addi a1, sp, 16
297 ; NOZFMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
298 ; NOZFMIN-NEXT: andi a0, a0, 1
300 ; NOZFMIN-NEXT: #NO_APP
301 ; NOZFMIN-NEXT: beqz a0, .LBB6_2
302 ; NOZFMIN-NEXT: # %bb.1: # %truebb
303 ; NOZFMIN-NEXT: lh a0, 16(sp) # 8-byte Folded Reload
304 ; NOZFMIN-NEXT: lui a1, 1048560
305 ; NOZFMIN-NEXT: or a0, a0, a1
306 ; NOZFMIN-NEXT: j .LBB6_3
307 ; NOZFMIN-NEXT: .LBB6_2: # %falsebb
308 ; NOZFMIN-NEXT: lui a0, 1048560
309 ; NOZFMIN-NEXT: .LBB6_3: # %falsebb
310 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
311 ; NOZFMIN-NEXT: csrr a0, vlenb
312 ; NOZFMIN-NEXT: add sp, sp, a0
313 ; NOZFMIN-NEXT: .cfi_def_cfa sp, 16
314 ; NOZFMIN-NEXT: addi sp, sp, 16
315 ; NOZFMIN-NEXT: .cfi_def_cfa_offset 0
317 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
318 br i1 %c, label %truebb, label %falsebb
320 %x = extractelement <vscale x 1 x half> %v, i32 0
326 define bfloat @bf16(<vscale x 2 x bfloat> %v, i1 %c) {
329 ; ZFMIN-NEXT: addi sp, sp, -16
330 ; ZFMIN-NEXT: .cfi_def_cfa_offset 16
331 ; ZFMIN-NEXT: csrr a1, vlenb
332 ; ZFMIN-NEXT: sub sp, sp, a1
333 ; ZFMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
334 ; ZFMIN-NEXT: addi a1, sp, 16
335 ; ZFMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
336 ; ZFMIN-NEXT: andi a0, a0, 1
338 ; ZFMIN-NEXT: #NO_APP
339 ; ZFMIN-NEXT: beqz a0, .LBB7_2
340 ; ZFMIN-NEXT: # %bb.1: # %truebb
341 ; ZFMIN-NEXT: lh a0, 16(sp) # 8-byte Folded Reload
342 ; ZFMIN-NEXT: fmv.h.x fa0, a0
343 ; ZFMIN-NEXT: j .LBB7_3
344 ; ZFMIN-NEXT: .LBB7_2: # %falsebb
345 ; ZFMIN-NEXT: fmv.h.x fa0, zero
346 ; ZFMIN-NEXT: .LBB7_3: # %falsebb
347 ; ZFMIN-NEXT: csrr a0, vlenb
348 ; ZFMIN-NEXT: add sp, sp, a0
349 ; ZFMIN-NEXT: .cfi_def_cfa sp, 16
350 ; ZFMIN-NEXT: addi sp, sp, 16
351 ; ZFMIN-NEXT: .cfi_def_cfa_offset 0
354 ; NOZFMIN-LABEL: bf16:
356 ; NOZFMIN-NEXT: addi sp, sp, -16
357 ; NOZFMIN-NEXT: .cfi_def_cfa_offset 16
358 ; NOZFMIN-NEXT: csrr a1, vlenb
359 ; NOZFMIN-NEXT: sub sp, sp, a1
360 ; NOZFMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 1 * vlenb
361 ; NOZFMIN-NEXT: addi a1, sp, 16
362 ; NOZFMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
363 ; NOZFMIN-NEXT: andi a0, a0, 1
365 ; NOZFMIN-NEXT: #NO_APP
366 ; NOZFMIN-NEXT: beqz a0, .LBB7_2
367 ; NOZFMIN-NEXT: # %bb.1: # %truebb
368 ; NOZFMIN-NEXT: lh a0, 16(sp) # 8-byte Folded Reload
369 ; NOZFMIN-NEXT: lui a1, 1048560
370 ; NOZFMIN-NEXT: or a0, a0, a1
371 ; NOZFMIN-NEXT: j .LBB7_3
372 ; NOZFMIN-NEXT: .LBB7_2: # %falsebb
373 ; NOZFMIN-NEXT: lui a0, 1048560
374 ; NOZFMIN-NEXT: .LBB7_3: # %falsebb
375 ; NOZFMIN-NEXT: fmv.w.x fa0, a0
376 ; NOZFMIN-NEXT: csrr a0, vlenb
377 ; NOZFMIN-NEXT: add sp, sp, a0
378 ; NOZFMIN-NEXT: .cfi_def_cfa sp, 16
379 ; NOZFMIN-NEXT: addi sp, sp, 16
380 ; NOZFMIN-NEXT: .cfi_def_cfa_offset 0
382 tail call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
383 br i1 %c, label %truebb, label %falsebb
385 %x = extractelement <vscale x 2 x bfloat> %v, i32 0