1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -verify-machineinstrs < %s | FileCheck %s
5 declare <vscale x 1 x i8> @llvm.vp.fshr.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
6 define <vscale x 1 x i8> @fshr_v1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
7 ; CHECK-LABEL: fshr_v1i8:
9 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
10 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
11 ; CHECK-NEXT: vnot.v v11, v10, v0.t
12 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
13 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
14 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
15 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
16 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
18 %res = call <vscale x 1 x i8> @llvm.vp.fshr.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 %evl)
19 ret <vscale x 1 x i8> %res
22 declare <vscale x 1 x i8> @llvm.vp.fshl.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i1>, i32)
23 define <vscale x 1 x i8> @fshl_v1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
24 ; CHECK-LABEL: fshl_v1i8:
26 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
27 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
28 ; CHECK-NEXT: vnot.v v11, v10, v0.t
29 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
30 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
31 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
32 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
33 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
35 %res = call <vscale x 1 x i8> @llvm.vp.fshl.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 %evl)
36 ret <vscale x 1 x i8> %res
39 declare <vscale x 2 x i8> @llvm.vp.fshr.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
40 define <vscale x 2 x i8> @fshr_v2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
41 ; CHECK-LABEL: fshr_v2i8:
43 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
44 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
45 ; CHECK-NEXT: vnot.v v11, v10, v0.t
46 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
47 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
48 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
49 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
50 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
52 %res = call <vscale x 2 x i8> @llvm.vp.fshr.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 %evl)
53 ret <vscale x 2 x i8> %res
56 declare <vscale x 2 x i8> @llvm.vp.fshl.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i1>, i32)
57 define <vscale x 2 x i8> @fshl_v2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
58 ; CHECK-LABEL: fshl_v2i8:
60 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
61 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
62 ; CHECK-NEXT: vnot.v v11, v10, v0.t
63 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
64 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
65 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
66 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
67 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
69 %res = call <vscale x 2 x i8> @llvm.vp.fshl.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, <vscale x 2 x i8> %c, <vscale x 2 x i1> %m, i32 %evl)
70 ret <vscale x 2 x i8> %res
73 declare <vscale x 4 x i8> @llvm.vp.fshr.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
74 define <vscale x 4 x i8> @fshr_v4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
75 ; CHECK-LABEL: fshr_v4i8:
77 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
78 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
79 ; CHECK-NEXT: vnot.v v11, v10, v0.t
80 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
81 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
82 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
83 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
84 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
86 %res = call <vscale x 4 x i8> @llvm.vp.fshr.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 %evl)
87 ret <vscale x 4 x i8> %res
90 declare <vscale x 4 x i8> @llvm.vp.fshl.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i1>, i32)
91 define <vscale x 4 x i8> @fshl_v4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
92 ; CHECK-LABEL: fshl_v4i8:
94 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
95 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
96 ; CHECK-NEXT: vnot.v v11, v10, v0.t
97 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
98 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
99 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
100 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
101 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
103 %res = call <vscale x 4 x i8> @llvm.vp.fshl.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, <vscale x 4 x i8> %c, <vscale x 4 x i1> %m, i32 %evl)
104 ret <vscale x 4 x i8> %res
107 declare <vscale x 8 x i8> @llvm.vp.fshr.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
108 define <vscale x 8 x i8> @fshr_v8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
109 ; CHECK-LABEL: fshr_v8i8:
111 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
112 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
113 ; CHECK-NEXT: vnot.v v11, v10, v0.t
114 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
115 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
116 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
117 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
118 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
120 %res = call <vscale x 8 x i8> @llvm.vp.fshr.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 %evl)
121 ret <vscale x 8 x i8> %res
124 declare <vscale x 8 x i8> @llvm.vp.fshl.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i1>, i32)
125 define <vscale x 8 x i8> @fshl_v8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
126 ; CHECK-LABEL: fshl_v8i8:
128 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
129 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
130 ; CHECK-NEXT: vnot.v v11, v10, v0.t
131 ; CHECK-NEXT: vand.vi v11, v11, 7, v0.t
132 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
133 ; CHECK-NEXT: vand.vi v10, v10, 7, v0.t
134 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
135 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
137 %res = call <vscale x 8 x i8> @llvm.vp.fshl.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c, <vscale x 8 x i1> %m, i32 %evl)
138 ret <vscale x 8 x i8> %res
141 declare <vscale x 16 x i8> @llvm.vp.fshr.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
142 define <vscale x 16 x i8> @fshr_v16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
143 ; CHECK-LABEL: fshr_v16i8:
145 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
146 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
147 ; CHECK-NEXT: vnot.v v14, v12, v0.t
148 ; CHECK-NEXT: vand.vi v14, v14, 7, v0.t
149 ; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t
150 ; CHECK-NEXT: vand.vi v12, v12, 7, v0.t
151 ; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t
152 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
154 %res = call <vscale x 16 x i8> @llvm.vp.fshr.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 %evl)
155 ret <vscale x 16 x i8> %res
158 declare <vscale x 16 x i8> @llvm.vp.fshl.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, i32)
159 define <vscale x 16 x i8> @fshl_v16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
160 ; CHECK-LABEL: fshl_v16i8:
162 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
163 ; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t
164 ; CHECK-NEXT: vnot.v v14, v12, v0.t
165 ; CHECK-NEXT: vand.vi v14, v14, 7, v0.t
166 ; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t
167 ; CHECK-NEXT: vand.vi v12, v12, 7, v0.t
168 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
169 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
171 %res = call <vscale x 16 x i8> @llvm.vp.fshl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c, <vscale x 16 x i1> %m, i32 %evl)
172 ret <vscale x 16 x i8> %res
175 declare <vscale x 32 x i8> @llvm.vp.fshr.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
176 define <vscale x 32 x i8> @fshr_v32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
177 ; CHECK-LABEL: fshr_v32i8:
179 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
180 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
181 ; CHECK-NEXT: vnot.v v20, v16, v0.t
182 ; CHECK-NEXT: vand.vi v20, v20, 7, v0.t
183 ; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t
184 ; CHECK-NEXT: vand.vi v16, v16, 7, v0.t
185 ; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t
186 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
188 %res = call <vscale x 32 x i8> @llvm.vp.fshr.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 %evl)
189 ret <vscale x 32 x i8> %res
192 declare <vscale x 32 x i8> @llvm.vp.fshl.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i1>, i32)
193 define <vscale x 32 x i8> @fshl_v32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
194 ; CHECK-LABEL: fshl_v32i8:
196 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
197 ; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t
198 ; CHECK-NEXT: vnot.v v20, v16, v0.t
199 ; CHECK-NEXT: vand.vi v20, v20, 7, v0.t
200 ; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t
201 ; CHECK-NEXT: vand.vi v16, v16, 7, v0.t
202 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
203 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
205 %res = call <vscale x 32 x i8> @llvm.vp.fshl.nxv32i8(<vscale x 32 x i8> %a, <vscale x 32 x i8> %b, <vscale x 32 x i8> %c, <vscale x 32 x i1> %m, i32 %evl)
206 ret <vscale x 32 x i8> %res
209 declare <vscale x 64 x i8> @llvm.vp.fshr.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
210 define <vscale x 64 x i8> @fshr_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
211 ; CHECK-LABEL: fshr_v64i8:
213 ; CHECK-NEXT: addi sp, sp, -16
214 ; CHECK-NEXT: .cfi_def_cfa_offset 16
215 ; CHECK-NEXT: csrr a2, vlenb
216 ; CHECK-NEXT: slli a2, a2, 3
217 ; CHECK-NEXT: sub sp, sp, a2
218 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
219 ; CHECK-NEXT: vl8r.v v24, (a0)
220 ; CHECK-NEXT: addi a0, sp, 16
221 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
222 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
223 ; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t
224 ; CHECK-NEXT: vnot.v v8, v24, v0.t
225 ; CHECK-NEXT: vand.vi v8, v8, 7, v0.t
226 ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
227 ; CHECK-NEXT: vand.vi v16, v24, 7, v0.t
228 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
229 ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
230 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
231 ; CHECK-NEXT: csrr a0, vlenb
232 ; CHECK-NEXT: slli a0, a0, 3
233 ; CHECK-NEXT: add sp, sp, a0
234 ; CHECK-NEXT: addi sp, sp, 16
236 %res = call <vscale x 64 x i8> @llvm.vp.fshr.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
237 ret <vscale x 64 x i8> %res
240 declare <vscale x 64 x i8> @llvm.vp.fshl.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i1>, i32)
241 define <vscale x 64 x i8> @fshl_v64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 zeroext %evl) {
242 ; CHECK-LABEL: fshl_v64i8:
244 ; CHECK-NEXT: addi sp, sp, -16
245 ; CHECK-NEXT: .cfi_def_cfa_offset 16
246 ; CHECK-NEXT: csrr a2, vlenb
247 ; CHECK-NEXT: slli a2, a2, 3
248 ; CHECK-NEXT: sub sp, sp, a2
249 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
250 ; CHECK-NEXT: vl8r.v v24, (a0)
251 ; CHECK-NEXT: addi a0, sp, 16
252 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
253 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
254 ; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t
255 ; CHECK-NEXT: vnot.v v8, v24, v0.t
256 ; CHECK-NEXT: vand.vi v8, v8, 7, v0.t
257 ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
258 ; CHECK-NEXT: vand.vi v16, v24, 7, v0.t
259 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
260 ; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t
261 ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
262 ; CHECK-NEXT: csrr a0, vlenb
263 ; CHECK-NEXT: slli a0, a0, 3
264 ; CHECK-NEXT: add sp, sp, a0
265 ; CHECK-NEXT: addi sp, sp, 16
267 %res = call <vscale x 64 x i8> @llvm.vp.fshl.nxv64i8(<vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i8> %c, <vscale x 64 x i1> %m, i32 %evl)
268 ret <vscale x 64 x i8> %res
271 declare <vscale x 1 x i16> @llvm.vp.fshr.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
272 define <vscale x 1 x i16> @fshr_v1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
273 ; CHECK-LABEL: fshr_v1i16:
275 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
276 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
277 ; CHECK-NEXT: vnot.v v11, v10, v0.t
278 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
279 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
280 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
281 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
282 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
284 %res = call <vscale x 1 x i16> @llvm.vp.fshr.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 %evl)
285 ret <vscale x 1 x i16> %res
288 declare <vscale x 1 x i16> @llvm.vp.fshl.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i1>, i32)
289 define <vscale x 1 x i16> @fshl_v1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
290 ; CHECK-LABEL: fshl_v1i16:
292 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
293 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
294 ; CHECK-NEXT: vnot.v v11, v10, v0.t
295 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
296 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
297 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
298 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
299 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
301 %res = call <vscale x 1 x i16> @llvm.vp.fshl.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, <vscale x 1 x i16> %c, <vscale x 1 x i1> %m, i32 %evl)
302 ret <vscale x 1 x i16> %res
305 declare <vscale x 2 x i16> @llvm.vp.fshr.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
306 define <vscale x 2 x i16> @fshr_v2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
307 ; CHECK-LABEL: fshr_v2i16:
309 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
310 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
311 ; CHECK-NEXT: vnot.v v11, v10, v0.t
312 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
313 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
314 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
315 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
316 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
318 %res = call <vscale x 2 x i16> @llvm.vp.fshr.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 %evl)
319 ret <vscale x 2 x i16> %res
322 declare <vscale x 2 x i16> @llvm.vp.fshl.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i1>, i32)
323 define <vscale x 2 x i16> @fshl_v2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
324 ; CHECK-LABEL: fshl_v2i16:
326 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
327 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
328 ; CHECK-NEXT: vnot.v v11, v10, v0.t
329 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
330 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
331 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
332 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
333 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
335 %res = call <vscale x 2 x i16> @llvm.vp.fshl.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, <vscale x 2 x i16> %c, <vscale x 2 x i1> %m, i32 %evl)
336 ret <vscale x 2 x i16> %res
339 declare <vscale x 4 x i16> @llvm.vp.fshr.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
340 define <vscale x 4 x i16> @fshr_v4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
341 ; CHECK-LABEL: fshr_v4i16:
343 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
344 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
345 ; CHECK-NEXT: vnot.v v11, v10, v0.t
346 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
347 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
348 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
349 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
350 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
352 %res = call <vscale x 4 x i16> @llvm.vp.fshr.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 %evl)
353 ret <vscale x 4 x i16> %res
356 declare <vscale x 4 x i16> @llvm.vp.fshl.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i1>, i32)
357 define <vscale x 4 x i16> @fshl_v4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
358 ; CHECK-LABEL: fshl_v4i16:
360 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
361 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
362 ; CHECK-NEXT: vnot.v v11, v10, v0.t
363 ; CHECK-NEXT: vand.vi v11, v11, 15, v0.t
364 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
365 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
366 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
367 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
369 %res = call <vscale x 4 x i16> @llvm.vp.fshl.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i1> %m, i32 %evl)
370 ret <vscale x 4 x i16> %res
373 declare <vscale x 8 x i16> @llvm.vp.fshr.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
374 define <vscale x 8 x i16> @fshr_v8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
375 ; CHECK-LABEL: fshr_v8i16:
377 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
378 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
379 ; CHECK-NEXT: vnot.v v14, v12, v0.t
380 ; CHECK-NEXT: vand.vi v14, v14, 15, v0.t
381 ; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t
382 ; CHECK-NEXT: vand.vi v12, v12, 15, v0.t
383 ; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t
384 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
386 %res = call <vscale x 8 x i16> @llvm.vp.fshr.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 %evl)
387 ret <vscale x 8 x i16> %res
390 declare <vscale x 8 x i16> @llvm.vp.fshl.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, i32)
391 define <vscale x 8 x i16> @fshl_v8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
392 ; CHECK-LABEL: fshl_v8i16:
394 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
395 ; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t
396 ; CHECK-NEXT: vnot.v v14, v12, v0.t
397 ; CHECK-NEXT: vand.vi v14, v14, 15, v0.t
398 ; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t
399 ; CHECK-NEXT: vand.vi v12, v12, 15, v0.t
400 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
401 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
403 %res = call <vscale x 8 x i16> @llvm.vp.fshl.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c, <vscale x 8 x i1> %m, i32 %evl)
404 ret <vscale x 8 x i16> %res
407 declare <vscale x 16 x i16> @llvm.vp.fshr.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
408 define <vscale x 16 x i16> @fshr_v16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
409 ; CHECK-LABEL: fshr_v16i16:
411 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
412 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
413 ; CHECK-NEXT: vnot.v v20, v16, v0.t
414 ; CHECK-NEXT: vand.vi v20, v20, 15, v0.t
415 ; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t
416 ; CHECK-NEXT: vand.vi v16, v16, 15, v0.t
417 ; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t
418 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
420 %res = call <vscale x 16 x i16> @llvm.vp.fshr.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 %evl)
421 ret <vscale x 16 x i16> %res
424 declare <vscale x 16 x i16> @llvm.vp.fshl.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i1>, i32)
425 define <vscale x 16 x i16> @fshl_v16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
426 ; CHECK-LABEL: fshl_v16i16:
428 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
429 ; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t
430 ; CHECK-NEXT: vnot.v v20, v16, v0.t
431 ; CHECK-NEXT: vand.vi v20, v20, 15, v0.t
432 ; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t
433 ; CHECK-NEXT: vand.vi v16, v16, 15, v0.t
434 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
435 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
437 %res = call <vscale x 16 x i16> @llvm.vp.fshl.nxv16i16(<vscale x 16 x i16> %a, <vscale x 16 x i16> %b, <vscale x 16 x i16> %c, <vscale x 16 x i1> %m, i32 %evl)
438 ret <vscale x 16 x i16> %res
441 declare <vscale x 32 x i16> @llvm.vp.fshr.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
442 define <vscale x 32 x i16> @fshr_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
443 ; CHECK-LABEL: fshr_v32i16:
445 ; CHECK-NEXT: addi sp, sp, -16
446 ; CHECK-NEXT: .cfi_def_cfa_offset 16
447 ; CHECK-NEXT: csrr a2, vlenb
448 ; CHECK-NEXT: slli a2, a2, 3
449 ; CHECK-NEXT: sub sp, sp, a2
450 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
451 ; CHECK-NEXT: vl8re16.v v24, (a0)
452 ; CHECK-NEXT: addi a0, sp, 16
453 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
454 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
455 ; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t
456 ; CHECK-NEXT: vnot.v v8, v24, v0.t
457 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
458 ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
459 ; CHECK-NEXT: vand.vi v16, v24, 15, v0.t
460 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
461 ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
462 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
463 ; CHECK-NEXT: csrr a0, vlenb
464 ; CHECK-NEXT: slli a0, a0, 3
465 ; CHECK-NEXT: add sp, sp, a0
466 ; CHECK-NEXT: addi sp, sp, 16
468 %res = call <vscale x 32 x i16> @llvm.vp.fshr.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
469 ret <vscale x 32 x i16> %res
472 declare <vscale x 32 x i16> @llvm.vp.fshl.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i1>, i32)
473 define <vscale x 32 x i16> @fshl_v32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 zeroext %evl) {
474 ; CHECK-LABEL: fshl_v32i16:
476 ; CHECK-NEXT: addi sp, sp, -16
477 ; CHECK-NEXT: .cfi_def_cfa_offset 16
478 ; CHECK-NEXT: csrr a2, vlenb
479 ; CHECK-NEXT: slli a2, a2, 3
480 ; CHECK-NEXT: sub sp, sp, a2
481 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
482 ; CHECK-NEXT: vl8re16.v v24, (a0)
483 ; CHECK-NEXT: addi a0, sp, 16
484 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
485 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
486 ; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t
487 ; CHECK-NEXT: vnot.v v8, v24, v0.t
488 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
489 ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
490 ; CHECK-NEXT: vand.vi v16, v24, 15, v0.t
491 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
492 ; CHECK-NEXT: vsll.vv v16, v24, v16, v0.t
493 ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
494 ; CHECK-NEXT: csrr a0, vlenb
495 ; CHECK-NEXT: slli a0, a0, 3
496 ; CHECK-NEXT: add sp, sp, a0
497 ; CHECK-NEXT: addi sp, sp, 16
499 %res = call <vscale x 32 x i16> @llvm.vp.fshl.nxv32i16(<vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i16> %c, <vscale x 32 x i1> %m, i32 %evl)
500 ret <vscale x 32 x i16> %res
503 declare <vscale x 1 x i32> @llvm.vp.fshr.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
504 define <vscale x 1 x i32> @fshr_v1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
505 ; CHECK-LABEL: fshr_v1i32:
507 ; CHECK-NEXT: li a1, 31
508 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
509 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
510 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
511 ; CHECK-NEXT: vnot.v v10, v10, v0.t
512 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
513 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
514 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
515 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
517 %res = call <vscale x 1 x i32> @llvm.vp.fshr.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 %evl)
518 ret <vscale x 1 x i32> %res
521 declare <vscale x 1 x i32> @llvm.vp.fshl.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i1>, i32)
522 define <vscale x 1 x i32> @fshl_v1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
523 ; CHECK-LABEL: fshl_v1i32:
525 ; CHECK-NEXT: li a1, 31
526 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
527 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
528 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
529 ; CHECK-NEXT: vnot.v v10, v10, v0.t
530 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
531 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
532 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
533 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
535 %res = call <vscale x 1 x i32> @llvm.vp.fshl.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, <vscale x 1 x i32> %c, <vscale x 1 x i1> %m, i32 %evl)
536 ret <vscale x 1 x i32> %res
539 declare <vscale x 2 x i32> @llvm.vp.fshr.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
540 define <vscale x 2 x i32> @fshr_v2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
541 ; CHECK-LABEL: fshr_v2i32:
543 ; CHECK-NEXT: li a1, 31
544 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
545 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
546 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
547 ; CHECK-NEXT: vnot.v v10, v10, v0.t
548 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
549 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
550 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
551 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
553 %res = call <vscale x 2 x i32> @llvm.vp.fshr.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 %evl)
554 ret <vscale x 2 x i32> %res
557 declare <vscale x 2 x i32> @llvm.vp.fshl.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i1>, i32)
558 define <vscale x 2 x i32> @fshl_v2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
559 ; CHECK-LABEL: fshl_v2i32:
561 ; CHECK-NEXT: li a1, 31
562 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
563 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
564 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
565 ; CHECK-NEXT: vnot.v v10, v10, v0.t
566 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
567 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
568 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
569 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
571 %res = call <vscale x 2 x i32> @llvm.vp.fshl.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c, <vscale x 2 x i1> %m, i32 %evl)
572 ret <vscale x 2 x i32> %res
575 declare <vscale x 4 x i32> @llvm.vp.fshr.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
576 define <vscale x 4 x i32> @fshr_v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
577 ; CHECK-LABEL: fshr_v4i32:
579 ; CHECK-NEXT: li a1, 31
580 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
581 ; CHECK-NEXT: vand.vx v14, v12, a1, v0.t
582 ; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t
583 ; CHECK-NEXT: vnot.v v12, v12, v0.t
584 ; CHECK-NEXT: vand.vx v12, v12, a1, v0.t
585 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
586 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
587 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
589 %res = call <vscale x 4 x i32> @llvm.vp.fshr.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 %evl)
590 ret <vscale x 4 x i32> %res
593 declare <vscale x 4 x i32> @llvm.vp.fshl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, i32)
594 define <vscale x 4 x i32> @fshl_v4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
595 ; CHECK-LABEL: fshl_v4i32:
597 ; CHECK-NEXT: li a1, 31
598 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
599 ; CHECK-NEXT: vand.vx v14, v12, a1, v0.t
600 ; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t
601 ; CHECK-NEXT: vnot.v v12, v12, v0.t
602 ; CHECK-NEXT: vand.vx v12, v12, a1, v0.t
603 ; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t
604 ; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t
605 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
607 %res = call <vscale x 4 x i32> @llvm.vp.fshl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c, <vscale x 4 x i1> %m, i32 %evl)
608 ret <vscale x 4 x i32> %res
611 declare <vscale x 8 x i32> @llvm.vp.fshr.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
612 define <vscale x 8 x i32> @fshr_v8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
613 ; CHECK-LABEL: fshr_v8i32:
615 ; CHECK-NEXT: li a1, 31
616 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
617 ; CHECK-NEXT: vand.vx v20, v16, a1, v0.t
618 ; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t
619 ; CHECK-NEXT: vnot.v v16, v16, v0.t
620 ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
621 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
622 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
623 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
625 %res = call <vscale x 8 x i32> @llvm.vp.fshr.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 %evl)
626 ret <vscale x 8 x i32> %res
629 declare <vscale x 8 x i32> @llvm.vp.fshl.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i1>, i32)
630 define <vscale x 8 x i32> @fshl_v8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
631 ; CHECK-LABEL: fshl_v8i32:
633 ; CHECK-NEXT: li a1, 31
634 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
635 ; CHECK-NEXT: vand.vx v20, v16, a1, v0.t
636 ; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t
637 ; CHECK-NEXT: vnot.v v16, v16, v0.t
638 ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
639 ; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t
640 ; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t
641 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
643 %res = call <vscale x 8 x i32> @llvm.vp.fshl.nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i32> %b, <vscale x 8 x i32> %c, <vscale x 8 x i1> %m, i32 %evl)
644 ret <vscale x 8 x i32> %res
647 declare <vscale x 16 x i32> @llvm.vp.fshr.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
648 define <vscale x 16 x i32> @fshr_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
649 ; CHECK-LABEL: fshr_v16i32:
651 ; CHECK-NEXT: addi sp, sp, -16
652 ; CHECK-NEXT: .cfi_def_cfa_offset 16
653 ; CHECK-NEXT: csrr a2, vlenb
654 ; CHECK-NEXT: slli a2, a2, 3
655 ; CHECK-NEXT: sub sp, sp, a2
656 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
657 ; CHECK-NEXT: vl8re32.v v24, (a0)
658 ; CHECK-NEXT: addi a0, sp, 16
659 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
660 ; CHECK-NEXT: li a0, 31
661 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
662 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
663 ; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t
664 ; CHECK-NEXT: vnot.v v8, v24, v0.t
665 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
666 ; CHECK-NEXT: addi a0, sp, 16
667 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
668 ; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t
669 ; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t
670 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
671 ; CHECK-NEXT: csrr a0, vlenb
672 ; CHECK-NEXT: slli a0, a0, 3
673 ; CHECK-NEXT: add sp, sp, a0
674 ; CHECK-NEXT: addi sp, sp, 16
676 %res = call <vscale x 16 x i32> @llvm.vp.fshr.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
677 ret <vscale x 16 x i32> %res
680 declare <vscale x 16 x i32> @llvm.vp.fshl.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i1>, i32)
681 define <vscale x 16 x i32> @fshl_v16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
682 ; CHECK-LABEL: fshl_v16i32:
684 ; CHECK-NEXT: addi sp, sp, -16
685 ; CHECK-NEXT: .cfi_def_cfa_offset 16
686 ; CHECK-NEXT: csrr a2, vlenb
687 ; CHECK-NEXT: slli a2, a2, 3
688 ; CHECK-NEXT: sub sp, sp, a2
689 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
690 ; CHECK-NEXT: vl8re32.v v24, (a0)
691 ; CHECK-NEXT: addi a0, sp, 16
692 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
693 ; CHECK-NEXT: vmv8r.v v16, v8
694 ; CHECK-NEXT: li a0, 31
695 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
696 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
697 ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
698 ; CHECK-NEXT: vnot.v v16, v24, v0.t
699 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
700 ; CHECK-NEXT: addi a0, sp, 16
701 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
702 ; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t
703 ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
704 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
705 ; CHECK-NEXT: csrr a0, vlenb
706 ; CHECK-NEXT: slli a0, a0, 3
707 ; CHECK-NEXT: add sp, sp, a0
708 ; CHECK-NEXT: addi sp, sp, 16
710 %res = call <vscale x 16 x i32> @llvm.vp.fshl.nxv16i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b, <vscale x 16 x i32> %c, <vscale x 16 x i1> %m, i32 %evl)
711 ret <vscale x 16 x i32> %res
714 declare <vscale x 1 x i64> @llvm.vp.fshr.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
715 define <vscale x 1 x i64> @fshr_v1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
716 ; CHECK-LABEL: fshr_v1i64:
718 ; CHECK-NEXT: li a1, 63
719 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
720 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
721 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
722 ; CHECK-NEXT: vnot.v v10, v10, v0.t
723 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
724 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
725 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
726 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
728 %res = call <vscale x 1 x i64> @llvm.vp.fshr.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 %evl)
729 ret <vscale x 1 x i64> %res
732 declare <vscale x 1 x i64> @llvm.vp.fshl.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i1>, i32)
733 define <vscale x 1 x i64> @fshl_v1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
734 ; CHECK-LABEL: fshl_v1i64:
736 ; CHECK-NEXT: li a1, 63
737 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
738 ; CHECK-NEXT: vand.vx v11, v10, a1, v0.t
739 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
740 ; CHECK-NEXT: vnot.v v10, v10, v0.t
741 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
742 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
743 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
744 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
746 %res = call <vscale x 1 x i64> @llvm.vp.fshl.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, <vscale x 1 x i64> %c, <vscale x 1 x i1> %m, i32 %evl)
747 ret <vscale x 1 x i64> %res
750 declare <vscale x 2 x i64> @llvm.vp.fshr.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
751 define <vscale x 2 x i64> @fshr_v2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
752 ; CHECK-LABEL: fshr_v2i64:
754 ; CHECK-NEXT: li a1, 63
755 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
756 ; CHECK-NEXT: vand.vx v14, v12, a1, v0.t
757 ; CHECK-NEXT: vsrl.vv v10, v10, v14, v0.t
758 ; CHECK-NEXT: vnot.v v12, v12, v0.t
759 ; CHECK-NEXT: vand.vx v12, v12, a1, v0.t
760 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
761 ; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
762 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
764 %res = call <vscale x 2 x i64> @llvm.vp.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 %evl)
765 ret <vscale x 2 x i64> %res
768 declare <vscale x 2 x i64> @llvm.vp.fshl.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, i32)
769 define <vscale x 2 x i64> @fshl_v2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
770 ; CHECK-LABEL: fshl_v2i64:
772 ; CHECK-NEXT: li a1, 63
773 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
774 ; CHECK-NEXT: vand.vx v14, v12, a1, v0.t
775 ; CHECK-NEXT: vsll.vv v8, v8, v14, v0.t
776 ; CHECK-NEXT: vnot.v v12, v12, v0.t
777 ; CHECK-NEXT: vand.vx v12, v12, a1, v0.t
778 ; CHECK-NEXT: vsrl.vi v10, v10, 1, v0.t
779 ; CHECK-NEXT: vsrl.vv v10, v10, v12, v0.t
780 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
782 %res = call <vscale x 2 x i64> @llvm.vp.fshl.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c, <vscale x 2 x i1> %m, i32 %evl)
783 ret <vscale x 2 x i64> %res
786 declare <vscale x 4 x i64> @llvm.vp.fshr.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
787 define <vscale x 4 x i64> @fshr_v4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
788 ; CHECK-LABEL: fshr_v4i64:
790 ; CHECK-NEXT: li a1, 63
791 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
792 ; CHECK-NEXT: vand.vx v20, v16, a1, v0.t
793 ; CHECK-NEXT: vsrl.vv v12, v12, v20, v0.t
794 ; CHECK-NEXT: vnot.v v16, v16, v0.t
795 ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
796 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
797 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
798 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
800 %res = call <vscale x 4 x i64> @llvm.vp.fshr.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 %evl)
801 ret <vscale x 4 x i64> %res
804 declare <vscale x 4 x i64> @llvm.vp.fshl.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i1>, i32)
805 define <vscale x 4 x i64> @fshl_v4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
806 ; CHECK-LABEL: fshl_v4i64:
808 ; CHECK-NEXT: li a1, 63
809 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
810 ; CHECK-NEXT: vand.vx v20, v16, a1, v0.t
811 ; CHECK-NEXT: vsll.vv v8, v8, v20, v0.t
812 ; CHECK-NEXT: vnot.v v16, v16, v0.t
813 ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
814 ; CHECK-NEXT: vsrl.vi v12, v12, 1, v0.t
815 ; CHECK-NEXT: vsrl.vv v12, v12, v16, v0.t
816 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
818 %res = call <vscale x 4 x i64> @llvm.vp.fshl.nxv4i64(<vscale x 4 x i64> %a, <vscale x 4 x i64> %b, <vscale x 4 x i64> %c, <vscale x 4 x i1> %m, i32 %evl)
819 ret <vscale x 4 x i64> %res
822 declare <vscale x 7 x i64> @llvm.vp.fshr.nxv7i64(<vscale x 7 x i64>, <vscale x 7 x i64>, <vscale x 7 x i64>, <vscale x 7 x i1>, i32)
823 define <vscale x 7 x i64> @fshr_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 zeroext %evl) {
824 ; CHECK-LABEL: fshr_v7i64:
826 ; CHECK-NEXT: addi sp, sp, -16
827 ; CHECK-NEXT: .cfi_def_cfa_offset 16
828 ; CHECK-NEXT: csrr a2, vlenb
829 ; CHECK-NEXT: slli a2, a2, 3
830 ; CHECK-NEXT: sub sp, sp, a2
831 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
832 ; CHECK-NEXT: vl8re64.v v24, (a0)
833 ; CHECK-NEXT: addi a0, sp, 16
834 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
835 ; CHECK-NEXT: li a0, 63
836 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
837 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
838 ; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t
839 ; CHECK-NEXT: vnot.v v8, v24, v0.t
840 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
841 ; CHECK-NEXT: addi a0, sp, 16
842 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
843 ; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t
844 ; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t
845 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
846 ; CHECK-NEXT: csrr a0, vlenb
847 ; CHECK-NEXT: slli a0, a0, 3
848 ; CHECK-NEXT: add sp, sp, a0
849 ; CHECK-NEXT: addi sp, sp, 16
851 %res = call <vscale x 7 x i64> @llvm.vp.fshr.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
852 ret <vscale x 7 x i64> %res
855 declare <vscale x 7 x i64> @llvm.vp.fshl.nxv7i64(<vscale x 7 x i64>, <vscale x 7 x i64>, <vscale x 7 x i64>, <vscale x 7 x i1>, i32)
856 define <vscale x 7 x i64> @fshl_v7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 zeroext %evl) {
857 ; CHECK-LABEL: fshl_v7i64:
859 ; CHECK-NEXT: addi sp, sp, -16
860 ; CHECK-NEXT: .cfi_def_cfa_offset 16
861 ; CHECK-NEXT: csrr a2, vlenb
862 ; CHECK-NEXT: slli a2, a2, 3
863 ; CHECK-NEXT: sub sp, sp, a2
864 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
865 ; CHECK-NEXT: vl8re64.v v24, (a0)
866 ; CHECK-NEXT: addi a0, sp, 16
867 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
868 ; CHECK-NEXT: vmv8r.v v16, v8
869 ; CHECK-NEXT: li a0, 63
870 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
871 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
872 ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
873 ; CHECK-NEXT: vnot.v v16, v24, v0.t
874 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
875 ; CHECK-NEXT: addi a0, sp, 16
876 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
877 ; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t
878 ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
879 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
880 ; CHECK-NEXT: csrr a0, vlenb
881 ; CHECK-NEXT: slli a0, a0, 3
882 ; CHECK-NEXT: add sp, sp, a0
883 ; CHECK-NEXT: addi sp, sp, 16
885 %res = call <vscale x 7 x i64> @llvm.vp.fshl.nxv7i64(<vscale x 7 x i64> %a, <vscale x 7 x i64> %b, <vscale x 7 x i64> %c, <vscale x 7 x i1> %m, i32 %evl)
886 ret <vscale x 7 x i64> %res
889 declare <vscale x 8 x i64> @llvm.vp.fshr.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
890 define <vscale x 8 x i64> @fshr_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
891 ; CHECK-LABEL: fshr_v8i64:
893 ; CHECK-NEXT: addi sp, sp, -16
894 ; CHECK-NEXT: .cfi_def_cfa_offset 16
895 ; CHECK-NEXT: csrr a2, vlenb
896 ; CHECK-NEXT: slli a2, a2, 3
897 ; CHECK-NEXT: sub sp, sp, a2
898 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
899 ; CHECK-NEXT: vl8re64.v v24, (a0)
900 ; CHECK-NEXT: addi a0, sp, 16
901 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
902 ; CHECK-NEXT: li a0, 63
903 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
904 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
905 ; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t
906 ; CHECK-NEXT: vnot.v v8, v24, v0.t
907 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
908 ; CHECK-NEXT: addi a0, sp, 16
909 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
910 ; CHECK-NEXT: vsll.vi v24, v24, 1, v0.t
911 ; CHECK-NEXT: vsll.vv v8, v24, v8, v0.t
912 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
913 ; CHECK-NEXT: csrr a0, vlenb
914 ; CHECK-NEXT: slli a0, a0, 3
915 ; CHECK-NEXT: add sp, sp, a0
916 ; CHECK-NEXT: addi sp, sp, 16
918 %res = call <vscale x 8 x i64> @llvm.vp.fshr.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
919 ret <vscale x 8 x i64> %res
922 declare <vscale x 8 x i64> @llvm.vp.fshl.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i1>, i32)
923 define <vscale x 8 x i64> @fshl_v8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
924 ; CHECK-LABEL: fshl_v8i64:
926 ; CHECK-NEXT: addi sp, sp, -16
927 ; CHECK-NEXT: .cfi_def_cfa_offset 16
928 ; CHECK-NEXT: csrr a2, vlenb
929 ; CHECK-NEXT: slli a2, a2, 3
930 ; CHECK-NEXT: sub sp, sp, a2
931 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
932 ; CHECK-NEXT: vl8re64.v v24, (a0)
933 ; CHECK-NEXT: addi a0, sp, 16
934 ; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
935 ; CHECK-NEXT: vmv8r.v v16, v8
936 ; CHECK-NEXT: li a0, 63
937 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
938 ; CHECK-NEXT: vand.vx v8, v24, a0, v0.t
939 ; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
940 ; CHECK-NEXT: vnot.v v16, v24, v0.t
941 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
942 ; CHECK-NEXT: addi a0, sp, 16
943 ; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
944 ; CHECK-NEXT: vsrl.vi v24, v24, 1, v0.t
945 ; CHECK-NEXT: vsrl.vv v16, v24, v16, v0.t
946 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
947 ; CHECK-NEXT: csrr a0, vlenb
948 ; CHECK-NEXT: slli a0, a0, 3
949 ; CHECK-NEXT: add sp, sp, a0
950 ; CHECK-NEXT: addi sp, sp, 16
952 %res = call <vscale x 8 x i64> @llvm.vp.fshl.nxv8i64(<vscale x 8 x i64> %a, <vscale x 8 x i64> %b, <vscale x 8 x i64> %c, <vscale x 8 x i1> %m, i32 %evl)
953 ret <vscale x 8 x i64> %res
956 declare <vscale x 16 x i64> @llvm.vp.fshr.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, <vscale x 16 x i64>, <vscale x 16 x i1>, i32)
957 define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
958 ; CHECK-LABEL: fshr_v16i64:
960 ; CHECK-NEXT: addi sp, sp, -16
961 ; CHECK-NEXT: .cfi_def_cfa_offset 16
962 ; CHECK-NEXT: csrr a1, vlenb
963 ; CHECK-NEXT: li a3, 48
964 ; CHECK-NEXT: mul a1, a1, a3
965 ; CHECK-NEXT: sub sp, sp, a1
966 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
967 ; CHECK-NEXT: vmv1r.v v24, v0
968 ; CHECK-NEXT: csrr a1, vlenb
969 ; CHECK-NEXT: li a3, 24
970 ; CHECK-NEXT: mul a1, a1, a3
971 ; CHECK-NEXT: add a1, sp, a1
972 ; CHECK-NEXT: addi a1, a1, 16
973 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
974 ; CHECK-NEXT: csrr a1, vlenb
975 ; CHECK-NEXT: slli a1, a1, 5
976 ; CHECK-NEXT: add a1, sp, a1
977 ; CHECK-NEXT: addi a1, a1, 16
978 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
979 ; CHECK-NEXT: csrr a1, vlenb
980 ; CHECK-NEXT: slli a3, a1, 3
981 ; CHECK-NEXT: add a5, a0, a3
982 ; CHECK-NEXT: add a3, a2, a3
983 ; CHECK-NEXT: vl8re64.v v16, (a3)
984 ; CHECK-NEXT: csrr a3, vlenb
985 ; CHECK-NEXT: slli a3, a3, 4
986 ; CHECK-NEXT: add a3, sp, a3
987 ; CHECK-NEXT: addi a3, a3, 16
988 ; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
989 ; CHECK-NEXT: sub a3, a4, a1
990 ; CHECK-NEXT: sltu a6, a4, a3
991 ; CHECK-NEXT: addi a6, a6, -1
992 ; CHECK-NEXT: and a6, a6, a3
993 ; CHECK-NEXT: srli a3, a1, 3
994 ; CHECK-NEXT: vl8re64.v v8, (a5)
995 ; CHECK-NEXT: csrr a5, vlenb
996 ; CHECK-NEXT: li a7, 40
997 ; CHECK-NEXT: mul a5, a5, a7
998 ; CHECK-NEXT: add a5, sp, a5
999 ; CHECK-NEXT: addi a5, a5, 16
1000 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
1001 ; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
1002 ; CHECK-NEXT: vslidedown.vx v0, v0, a3
1003 ; CHECK-NEXT: li a3, 63
1004 ; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
1005 ; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
1006 ; CHECK-NEXT: csrr a5, vlenb
1007 ; CHECK-NEXT: li a6, 40
1008 ; CHECK-NEXT: mul a5, a5, a6
1009 ; CHECK-NEXT: add a5, sp, a5
1010 ; CHECK-NEXT: addi a5, a5, 16
1011 ; CHECK-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload
1012 ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
1013 ; CHECK-NEXT: csrr a5, vlenb
1014 ; CHECK-NEXT: slli a5, a5, 3
1015 ; CHECK-NEXT: add a5, sp, a5
1016 ; CHECK-NEXT: addi a5, a5, 16
1017 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
1018 ; CHECK-NEXT: csrr a5, vlenb
1019 ; CHECK-NEXT: slli a5, a5, 4
1020 ; CHECK-NEXT: add a5, sp, a5
1021 ; CHECK-NEXT: addi a5, a5, 16
1022 ; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
1023 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1024 ; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
1025 ; CHECK-NEXT: addi a5, sp, 16
1026 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
1027 ; CHECK-NEXT: vl8re64.v v8, (a0)
1028 ; CHECK-NEXT: csrr a0, vlenb
1029 ; CHECK-NEXT: slli a0, a0, 4
1030 ; CHECK-NEXT: add a0, sp, a0
1031 ; CHECK-NEXT: addi a0, a0, 16
1032 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1033 ; CHECK-NEXT: vl8re64.v v8, (a2)
1034 ; CHECK-NEXT: csrr a0, vlenb
1035 ; CHECK-NEXT: li a2, 40
1036 ; CHECK-NEXT: mul a0, a0, a2
1037 ; CHECK-NEXT: add a0, sp, a0
1038 ; CHECK-NEXT: addi a0, a0, 16
1039 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1040 ; CHECK-NEXT: csrr a0, vlenb
1041 ; CHECK-NEXT: li a2, 24
1042 ; CHECK-NEXT: mul a0, a0, a2
1043 ; CHECK-NEXT: add a0, sp, a0
1044 ; CHECK-NEXT: addi a0, a0, 16
1045 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1046 ; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t
1047 ; CHECK-NEXT: addi a0, sp, 16
1048 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1049 ; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
1050 ; CHECK-NEXT: csrr a0, vlenb
1051 ; CHECK-NEXT: slli a0, a0, 3
1052 ; CHECK-NEXT: add a0, sp, a0
1053 ; CHECK-NEXT: addi a0, a0, 16
1054 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1055 ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
1056 ; CHECK-NEXT: csrr a0, vlenb
1057 ; CHECK-NEXT: li a2, 24
1058 ; CHECK-NEXT: mul a0, a0, a2
1059 ; CHECK-NEXT: add a0, sp, a0
1060 ; CHECK-NEXT: addi a0, a0, 16
1061 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1062 ; CHECK-NEXT: bltu a4, a1, .LBB46_2
1063 ; CHECK-NEXT: # %bb.1:
1064 ; CHECK-NEXT: mv a4, a1
1065 ; CHECK-NEXT: .LBB46_2:
1066 ; CHECK-NEXT: vmv1r.v v0, v24
1067 ; CHECK-NEXT: csrr a0, vlenb
1068 ; CHECK-NEXT: li a1, 40
1069 ; CHECK-NEXT: mul a0, a0, a1
1070 ; CHECK-NEXT: add a0, sp, a0
1071 ; CHECK-NEXT: addi a0, a0, 16
1072 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1073 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
1074 ; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
1075 ; CHECK-NEXT: csrr a0, vlenb
1076 ; CHECK-NEXT: slli a0, a0, 4
1077 ; CHECK-NEXT: add a0, sp, a0
1078 ; CHECK-NEXT: addi a0, a0, 16
1079 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1080 ; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
1081 ; CHECK-NEXT: csrr a0, vlenb
1082 ; CHECK-NEXT: slli a0, a0, 4
1083 ; CHECK-NEXT: add a0, sp, a0
1084 ; CHECK-NEXT: addi a0, a0, 16
1085 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1086 ; CHECK-NEXT: csrr a0, vlenb
1087 ; CHECK-NEXT: li a1, 40
1088 ; CHECK-NEXT: mul a0, a0, a1
1089 ; CHECK-NEXT: add a0, sp, a0
1090 ; CHECK-NEXT: addi a0, a0, 16
1091 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1092 ; CHECK-NEXT: vnot.v v16, v8, v0.t
1093 ; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
1094 ; CHECK-NEXT: csrr a0, vlenb
1095 ; CHECK-NEXT: slli a0, a0, 5
1096 ; CHECK-NEXT: add a0, sp, a0
1097 ; CHECK-NEXT: addi a0, a0, 16
1098 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1099 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
1100 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
1101 ; CHECK-NEXT: csrr a0, vlenb
1102 ; CHECK-NEXT: slli a0, a0, 4
1103 ; CHECK-NEXT: add a0, sp, a0
1104 ; CHECK-NEXT: addi a0, a0, 16
1105 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1106 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
1107 ; CHECK-NEXT: csrr a0, vlenb
1108 ; CHECK-NEXT: li a1, 24
1109 ; CHECK-NEXT: mul a0, a0, a1
1110 ; CHECK-NEXT: add a0, sp, a0
1111 ; CHECK-NEXT: addi a0, a0, 16
1112 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1113 ; CHECK-NEXT: csrr a0, vlenb
1114 ; CHECK-NEXT: li a1, 48
1115 ; CHECK-NEXT: mul a0, a0, a1
1116 ; CHECK-NEXT: add sp, sp, a0
1117 ; CHECK-NEXT: addi sp, sp, 16
1119 %res = call <vscale x 16 x i64> @llvm.vp.fshr.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
1120 ret <vscale x 16 x i64> %res
1123 declare <vscale x 16 x i64> @llvm.vp.fshl.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i64>, <vscale x 16 x i64>, <vscale x 16 x i1>, i32)
1124 define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
1125 ; CHECK-LABEL: fshl_v16i64:
1127 ; CHECK-NEXT: addi sp, sp, -16
1128 ; CHECK-NEXT: .cfi_def_cfa_offset 16
1129 ; CHECK-NEXT: csrr a1, vlenb
1130 ; CHECK-NEXT: li a3, 40
1131 ; CHECK-NEXT: mul a1, a1, a3
1132 ; CHECK-NEXT: sub sp, sp, a1
1133 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
1134 ; CHECK-NEXT: vmv1r.v v24, v0
1135 ; CHECK-NEXT: csrr a1, vlenb
1136 ; CHECK-NEXT: slli a1, a1, 5
1137 ; CHECK-NEXT: add a1, sp, a1
1138 ; CHECK-NEXT: addi a1, a1, 16
1139 ; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
1140 ; CHECK-NEXT: csrr a1, vlenb
1141 ; CHECK-NEXT: li a3, 24
1142 ; CHECK-NEXT: mul a1, a1, a3
1143 ; CHECK-NEXT: add a1, sp, a1
1144 ; CHECK-NEXT: addi a1, a1, 16
1145 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
1146 ; CHECK-NEXT: csrr a3, vlenb
1147 ; CHECK-NEXT: slli a5, a3, 3
1148 ; CHECK-NEXT: srli a1, a3, 3
1149 ; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
1150 ; CHECK-NEXT: vslidedown.vx v0, v0, a1
1151 ; CHECK-NEXT: add a1, a2, a5
1152 ; CHECK-NEXT: vl8re64.v v8, (a1)
1153 ; CHECK-NEXT: csrr a1, vlenb
1154 ; CHECK-NEXT: slli a1, a1, 4
1155 ; CHECK-NEXT: add a1, sp, a1
1156 ; CHECK-NEXT: addi a1, a1, 16
1157 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
1158 ; CHECK-NEXT: sub a1, a4, a3
1159 ; CHECK-NEXT: sltu a6, a4, a1
1160 ; CHECK-NEXT: addi a6, a6, -1
1161 ; CHECK-NEXT: and a6, a6, a1
1162 ; CHECK-NEXT: li a1, 63
1163 ; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
1164 ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
1165 ; CHECK-NEXT: csrr a6, vlenb
1166 ; CHECK-NEXT: slli a6, a6, 5
1167 ; CHECK-NEXT: add a6, sp, a6
1168 ; CHECK-NEXT: addi a6, a6, 16
1169 ; CHECK-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
1170 ; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
1171 ; CHECK-NEXT: csrr a6, vlenb
1172 ; CHECK-NEXT: slli a6, a6, 3
1173 ; CHECK-NEXT: add a6, sp, a6
1174 ; CHECK-NEXT: addi a6, a6, 16
1175 ; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill
1176 ; CHECK-NEXT: add a5, a0, a5
1177 ; CHECK-NEXT: csrr a6, vlenb
1178 ; CHECK-NEXT: slli a6, a6, 4
1179 ; CHECK-NEXT: add a6, sp, a6
1180 ; CHECK-NEXT: addi a6, a6, 16
1181 ; CHECK-NEXT: vl8r.v v8, (a6) # Unknown-size Folded Reload
1182 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1183 ; CHECK-NEXT: vl8re64.v v16, (a5)
1184 ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
1185 ; CHECK-NEXT: addi a5, sp, 16
1186 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
1187 ; CHECK-NEXT: vl8re64.v v8, (a0)
1188 ; CHECK-NEXT: csrr a0, vlenb
1189 ; CHECK-NEXT: slli a0, a0, 4
1190 ; CHECK-NEXT: add a0, sp, a0
1191 ; CHECK-NEXT: addi a0, a0, 16
1192 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1193 ; CHECK-NEXT: vl8re64.v v8, (a2)
1194 ; CHECK-NEXT: csrr a0, vlenb
1195 ; CHECK-NEXT: slli a0, a0, 5
1196 ; CHECK-NEXT: add a0, sp, a0
1197 ; CHECK-NEXT: addi a0, a0, 16
1198 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1199 ; CHECK-NEXT: vsrl.vi v16, v16, 1, v0.t
1200 ; CHECK-NEXT: addi a0, sp, 16
1201 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1202 ; CHECK-NEXT: vsrl.vv v16, v16, v8, v0.t
1203 ; CHECK-NEXT: csrr a0, vlenb
1204 ; CHECK-NEXT: slli a0, a0, 3
1205 ; CHECK-NEXT: add a0, sp, a0
1206 ; CHECK-NEXT: addi a0, a0, 16
1207 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1208 ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
1209 ; CHECK-NEXT: csrr a0, vlenb
1210 ; CHECK-NEXT: slli a0, a0, 3
1211 ; CHECK-NEXT: add a0, sp, a0
1212 ; CHECK-NEXT: addi a0, a0, 16
1213 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1214 ; CHECK-NEXT: bltu a4, a3, .LBB47_2
1215 ; CHECK-NEXT: # %bb.1:
1216 ; CHECK-NEXT: mv a4, a3
1217 ; CHECK-NEXT: .LBB47_2:
1218 ; CHECK-NEXT: vmv1r.v v0, v24
1219 ; CHECK-NEXT: csrr a0, vlenb
1220 ; CHECK-NEXT: slli a0, a0, 5
1221 ; CHECK-NEXT: add a0, sp, a0
1222 ; CHECK-NEXT: addi a0, a0, 16
1223 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1224 ; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
1225 ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
1226 ; CHECK-NEXT: addi a0, sp, 16
1227 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1228 ; CHECK-NEXT: csrr a0, vlenb
1229 ; CHECK-NEXT: li a2, 24
1230 ; CHECK-NEXT: mul a0, a0, a2
1231 ; CHECK-NEXT: add a0, sp, a0
1232 ; CHECK-NEXT: addi a0, a0, 16
1233 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1234 ; CHECK-NEXT: addi a0, sp, 16
1235 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1236 ; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
1237 ; CHECK-NEXT: csrr a0, vlenb
1238 ; CHECK-NEXT: li a2, 24
1239 ; CHECK-NEXT: mul a0, a0, a2
1240 ; CHECK-NEXT: add a0, sp, a0
1241 ; CHECK-NEXT: addi a0, a0, 16
1242 ; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
1243 ; CHECK-NEXT: csrr a0, vlenb
1244 ; CHECK-NEXT: slli a0, a0, 5
1245 ; CHECK-NEXT: add a0, sp, a0
1246 ; CHECK-NEXT: addi a0, a0, 16
1247 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1248 ; CHECK-NEXT: vnot.v v8, v8, v0.t
1249 ; CHECK-NEXT: vand.vx v16, v8, a1, v0.t
1250 ; CHECK-NEXT: csrr a0, vlenb
1251 ; CHECK-NEXT: slli a0, a0, 4
1252 ; CHECK-NEXT: add a0, sp, a0
1253 ; CHECK-NEXT: addi a0, a0, 16
1254 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
1255 ; CHECK-NEXT: vsrl.vi v8, v8, 1, v0.t
1256 ; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
1257 ; CHECK-NEXT: csrr a0, vlenb
1258 ; CHECK-NEXT: li a1, 24
1259 ; CHECK-NEXT: mul a0, a0, a1
1260 ; CHECK-NEXT: add a0, sp, a0
1261 ; CHECK-NEXT: addi a0, a0, 16
1262 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1263 ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
1264 ; CHECK-NEXT: csrr a0, vlenb
1265 ; CHECK-NEXT: slli a0, a0, 3
1266 ; CHECK-NEXT: add a0, sp, a0
1267 ; CHECK-NEXT: addi a0, a0, 16
1268 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
1269 ; CHECK-NEXT: csrr a0, vlenb
1270 ; CHECK-NEXT: li a1, 40
1271 ; CHECK-NEXT: mul a0, a0, a1
1272 ; CHECK-NEXT: add sp, sp, a0
1273 ; CHECK-NEXT: addi sp, sp, 16
1275 %res = call <vscale x 16 x i64> @llvm.vp.fshl.nxv16i64(<vscale x 16 x i64> %a, <vscale x 16 x i64> %b, <vscale x 16 x i64> %c, <vscale x 16 x i1> %m, i32 %evl)
1276 ret <vscale x 16 x i64> %res
1280 declare <vscale x 1 x i9> @llvm.vp.fshr.nxv1i9(<vscale x 1 x i9>, <vscale x 1 x i9>, <vscale x 1 x i9>, <vscale x 1 x i1>, i32)
1281 define <vscale x 1 x i9> @fshr_v1i9(<vscale x 1 x i9> %a, <vscale x 1 x i9> %b, <vscale x 1 x i9> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1282 ; CHECK-LABEL: fshr_v1i9:
1284 ; CHECK-NEXT: li a1, 511
1285 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1286 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
1287 ; CHECK-NEXT: li a0, 9
1288 ; CHECK-NEXT: vremu.vx v10, v10, a0, v0.t
1289 ; CHECK-NEXT: vadd.vi v10, v10, 7, v0.t
1290 ; CHECK-NEXT: vand.vi v11, v10, 15, v0.t
1291 ; CHECK-NEXT: vsll.vi v9, v9, 7, v0.t
1292 ; CHECK-NEXT: vsrl.vv v9, v9, v11, v0.t
1293 ; CHECK-NEXT: vnot.v v10, v10, v0.t
1294 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
1295 ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
1296 ; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
1297 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
1299 %res = call <vscale x 1 x i9> @llvm.vp.fshr.nxv1i9(<vscale x 1 x i9> %a, <vscale x 1 x i9> %b, <vscale x 1 x i9> %c, <vscale x 1 x i1> %m, i32 %evl)
1300 ret <vscale x 1 x i9> %res
1303 declare <vscale x 1 x i9> @llvm.vp.fshl.nxv1i9(<vscale x 1 x i9>, <vscale x 1 x i9>, <vscale x 1 x i9>, <vscale x 1 x i1>, i32)
1304 define <vscale x 1 x i9> @fshl_v1i9(<vscale x 1 x i9> %a, <vscale x 1 x i9> %b, <vscale x 1 x i9> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1305 ; CHECK-LABEL: fshl_v1i9:
1307 ; CHECK-NEXT: li a1, 511
1308 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1309 ; CHECK-NEXT: vand.vx v10, v10, a1, v0.t
1310 ; CHECK-NEXT: li a0, 9
1311 ; CHECK-NEXT: vremu.vx v10, v10, a0, v0.t
1312 ; CHECK-NEXT: vand.vi v11, v10, 15, v0.t
1313 ; CHECK-NEXT: vsll.vv v8, v8, v11, v0.t
1314 ; CHECK-NEXT: vnot.v v10, v10, v0.t
1315 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
1316 ; CHECK-NEXT: vsll.vi v9, v9, 7, v0.t
1317 ; CHECK-NEXT: vsrl.vi v9, v9, 1, v0.t
1318 ; CHECK-NEXT: vsrl.vv v9, v9, v10, v0.t
1319 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
1321 %res = call <vscale x 1 x i9> @llvm.vp.fshl.nxv1i9(<vscale x 1 x i9> %a, <vscale x 1 x i9> %b, <vscale x 1 x i9> %c, <vscale x 1 x i1> %m, i32 %evl)
1322 ret <vscale x 1 x i9> %res
1325 declare <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i1>, i32)
1326 declare <vscale x 1 x i8> @llvm.vp.zext.nxv1i8.nxv1i4(<vscale x 1 x i4>, <vscale x 1 x i1>, i32)
1327 declare <vscale x 1 x i4> @llvm.vp.fshr.nxv1i4(<vscale x 1 x i4>, <vscale x 1 x i4>, <vscale x 1 x i4>, <vscale x 1 x i1>, i32)
1328 define <vscale x 1 x i8> @fshr_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1329 ; CHECK-LABEL: fshr_v1i4:
1331 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1332 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
1333 ; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
1334 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
1335 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
1336 ; CHECK-NEXT: li a0, 4
1337 ; CHECK-NEXT: vremu.vx v9, v10, a0, v0.t
1338 ; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
1339 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
1341 %trunca = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i1> %m, i32 zeroext %evl)
1342 %truncb = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl)
1343 %truncc = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl)
1344 %fshr = call <vscale x 1 x i4> @llvm.vp.fshr.nxv1i4(<vscale x 1 x i4> %trunca, <vscale x 1 x i4> %truncb, <vscale x 1 x i4> %truncc, <vscale x 1 x i1> %m, i32 %evl)
1345 %res = call <vscale x 1 x i8> @llvm.vp.zext.nxv1i8.nxv1i4(<vscale x 1 x i4> %fshr, <vscale x 1 x i1> %m, i32 zeroext %evl)
1346 ret <vscale x 1 x i8> %res
1349 declare <vscale x 1 x i4> @llvm.vp.fshl.nxv1i4(<vscale x 1 x i4>, <vscale x 1 x i4>, <vscale x 1 x i4>, <vscale x 1 x i1>, i32)
1350 define <vscale x 1 x i8> @fshl_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
1351 ; CHECK-LABEL: fshl_v1i4:
1353 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1354 ; CHECK-NEXT: vand.vi v10, v10, 15, v0.t
1355 ; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
1356 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
1357 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
1358 ; CHECK-NEXT: li a0, 4
1359 ; CHECK-NEXT: vremu.vx v9, v10, a0, v0.t
1360 ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
1361 ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
1362 ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
1364 %trunca = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i1> %m, i32 zeroext %evl)
1365 %truncb = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl)
1366 %truncc = call <vscale x 1 x i4> @llvm.vp.trunc.nxv1i4.nxv1i8(<vscale x 1 x i8> %c, <vscale x 1 x i1> %m, i32 zeroext %evl)
1367 %fshl = call <vscale x 1 x i4> @llvm.vp.fshl.nxv1i4(<vscale x 1 x i4> %trunca, <vscale x 1 x i4> %truncb, <vscale x 1 x i4> %truncc, <vscale x 1 x i1> %m, i32 %evl)
1368 %res = call <vscale x 1 x i8> @llvm.vp.zext.nxv1i8.nxv1i4(<vscale x 1 x i4> %fshl, <vscale x 1 x i1> %m, i32 zeroext %evl)
1369 ret <vscale x 1 x i8> %res