1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
3 ; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV32 %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -global-isel -stop-after=irtranslator \
5 ; RUN: -verify-machineinstrs < %s | FileCheck -check-prefixes=RV64 %s
7 define <vscale x 1 x i1> @splat_zero_nxv1i1() {
8 ; RV32-LABEL: name: splat_zero_nxv1i1
9 ; RV32: bb.1 (%ir-block.0):
10 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
11 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
12 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
13 ; RV32-NEXT: PseudoRET implicit $v0
15 ; RV64-LABEL: name: splat_zero_nxv1i1
16 ; RV64: bb.1 (%ir-block.0):
17 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
18 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s1>) = G_SPLAT_VECTOR [[C]](s1)
19 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s1>)
20 ; RV64-NEXT: PseudoRET implicit $v0
21 ret <vscale x 1 x i1> zeroinitializer
24 define <vscale x 2 x i1> @splat_zero_nxv2i1() {
25 ; RV32-LABEL: name: splat_zero_nxv2i1
26 ; RV32: bb.1 (%ir-block.0):
27 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
28 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
29 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
30 ; RV32-NEXT: PseudoRET implicit $v0
32 ; RV64-LABEL: name: splat_zero_nxv2i1
33 ; RV64: bb.1 (%ir-block.0):
34 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
35 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s1>) = G_SPLAT_VECTOR [[C]](s1)
36 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s1>)
37 ; RV64-NEXT: PseudoRET implicit $v0
38 ret <vscale x 2 x i1> zeroinitializer
41 define <vscale x 4 x i1> @splat_zero_nxv4i1() {
42 ; RV32-LABEL: name: splat_zero_nxv4i1
43 ; RV32: bb.1 (%ir-block.0):
44 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
45 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
46 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
47 ; RV32-NEXT: PseudoRET implicit $v0
49 ; RV64-LABEL: name: splat_zero_nxv4i1
50 ; RV64: bb.1 (%ir-block.0):
51 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
52 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s1>) = G_SPLAT_VECTOR [[C]](s1)
53 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s1>)
54 ; RV64-NEXT: PseudoRET implicit $v0
55 ret <vscale x 4 x i1> zeroinitializer
58 define <vscale x 8 x i1> @splat_zero_nxv8i1() {
59 ; RV32-LABEL: name: splat_zero_nxv8i1
60 ; RV32: bb.1 (%ir-block.0):
61 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
62 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
63 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
64 ; RV32-NEXT: PseudoRET implicit $v0
66 ; RV64-LABEL: name: splat_zero_nxv8i1
67 ; RV64: bb.1 (%ir-block.0):
68 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
69 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s1>) = G_SPLAT_VECTOR [[C]](s1)
70 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s1>)
71 ; RV64-NEXT: PseudoRET implicit $v0
72 ret <vscale x 8 x i1> zeroinitializer
75 define <vscale x 16 x i1> @splat_zero_nxv16i1() {
76 ; RV32-LABEL: name: splat_zero_nxv16i1
77 ; RV32: bb.1 (%ir-block.0):
78 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
79 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
80 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
81 ; RV32-NEXT: PseudoRET implicit $v0
83 ; RV64-LABEL: name: splat_zero_nxv16i1
84 ; RV64: bb.1 (%ir-block.0):
85 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
86 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s1>) = G_SPLAT_VECTOR [[C]](s1)
87 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s1>)
88 ; RV64-NEXT: PseudoRET implicit $v0
89 ret <vscale x 16 x i1> zeroinitializer
92 define <vscale x 32 x i1> @splat_zero_nxv32i1() {
93 ; RV32-LABEL: name: splat_zero_nxv32i1
94 ; RV32: bb.1 (%ir-block.0):
95 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
96 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
97 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
98 ; RV32-NEXT: PseudoRET implicit $v0
100 ; RV64-LABEL: name: splat_zero_nxv32i1
101 ; RV64: bb.1 (%ir-block.0):
102 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
103 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s1>) = G_SPLAT_VECTOR [[C]](s1)
104 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s1>)
105 ; RV64-NEXT: PseudoRET implicit $v0
106 ret <vscale x 32 x i1> zeroinitializer
109 define <vscale x 64 x i1> @splat_zero_nxv64i1() {
110 ; RV32-LABEL: name: splat_zero_nxv64i1
111 ; RV32: bb.1 (%ir-block.0):
112 ; RV32-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
113 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
114 ; RV32-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
115 ; RV32-NEXT: PseudoRET implicit $v0
117 ; RV64-LABEL: name: splat_zero_nxv64i1
118 ; RV64: bb.1 (%ir-block.0):
119 ; RV64-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false
120 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s1>) = G_SPLAT_VECTOR [[C]](s1)
121 ; RV64-NEXT: $v0 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s1>)
122 ; RV64-NEXT: PseudoRET implicit $v0
123 ret <vscale x 64 x i1> zeroinitializer
126 define <vscale x 1 x i8> @splat_zero_nxv1i8() {
127 ; RV32-LABEL: name: splat_zero_nxv1i8
128 ; RV32: bb.1 (%ir-block.0):
129 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
130 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
131 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
132 ; RV32-NEXT: PseudoRET implicit $v8
134 ; RV64-LABEL: name: splat_zero_nxv1i8
135 ; RV64: bb.1 (%ir-block.0):
136 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
137 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s8>) = G_SPLAT_VECTOR [[C]](s8)
138 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s8>)
139 ; RV64-NEXT: PseudoRET implicit $v8
140 ret <vscale x 1 x i8> zeroinitializer
143 define <vscale x 2 x i8> @splat_zero_nxv2i8() {
144 ; RV32-LABEL: name: splat_zero_nxv2i8
145 ; RV32: bb.1 (%ir-block.0):
146 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
147 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
148 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
149 ; RV32-NEXT: PseudoRET implicit $v8
151 ; RV64-LABEL: name: splat_zero_nxv2i8
152 ; RV64: bb.1 (%ir-block.0):
153 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
154 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s8>) = G_SPLAT_VECTOR [[C]](s8)
155 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s8>)
156 ; RV64-NEXT: PseudoRET implicit $v8
157 ret <vscale x 2 x i8> zeroinitializer
160 define <vscale x 4 x i8> @splat_zero_nxv4i8() {
161 ; RV32-LABEL: name: splat_zero_nxv4i8
162 ; RV32: bb.1 (%ir-block.0):
163 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
164 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
165 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
166 ; RV32-NEXT: PseudoRET implicit $v8
168 ; RV64-LABEL: name: splat_zero_nxv4i8
169 ; RV64: bb.1 (%ir-block.0):
170 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
171 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s8>) = G_SPLAT_VECTOR [[C]](s8)
172 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s8>)
173 ; RV64-NEXT: PseudoRET implicit $v8
174 ret <vscale x 4 x i8> zeroinitializer
177 define <vscale x 8 x i8> @splat_zero_nxv8i8() {
178 ; RV32-LABEL: name: splat_zero_nxv8i8
179 ; RV32: bb.1 (%ir-block.0):
180 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
181 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
182 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
183 ; RV32-NEXT: PseudoRET implicit $v8
185 ; RV64-LABEL: name: splat_zero_nxv8i8
186 ; RV64: bb.1 (%ir-block.0):
187 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
188 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s8>) = G_SPLAT_VECTOR [[C]](s8)
189 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s8>)
190 ; RV64-NEXT: PseudoRET implicit $v8
191 ret <vscale x 8 x i8> zeroinitializer
194 define <vscale x 16 x i8> @splat_zero_nxv16i8() {
195 ; RV32-LABEL: name: splat_zero_nxv16i8
196 ; RV32: bb.1 (%ir-block.0):
197 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
198 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
199 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
200 ; RV32-NEXT: PseudoRET implicit $v8m2
202 ; RV64-LABEL: name: splat_zero_nxv16i8
203 ; RV64: bb.1 (%ir-block.0):
204 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
205 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s8>) = G_SPLAT_VECTOR [[C]](s8)
206 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s8>)
207 ; RV64-NEXT: PseudoRET implicit $v8m2
208 ret <vscale x 16 x i8> zeroinitializer
211 define <vscale x 32 x i8> @splat_zero_nxv32i8() {
212 ; RV32-LABEL: name: splat_zero_nxv32i8
213 ; RV32: bb.1 (%ir-block.0):
214 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
215 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
216 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
217 ; RV32-NEXT: PseudoRET implicit $v8m4
219 ; RV64-LABEL: name: splat_zero_nxv32i8
220 ; RV64: bb.1 (%ir-block.0):
221 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
222 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s8>) = G_SPLAT_VECTOR [[C]](s8)
223 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s8>)
224 ; RV64-NEXT: PseudoRET implicit $v8m4
225 ret <vscale x 32 x i8> zeroinitializer
228 define <vscale x 64 x i8> @splat_zero_nxv64i8() {
229 ; RV32-LABEL: name: splat_zero_nxv64i8
230 ; RV32: bb.1 (%ir-block.0):
231 ; RV32-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
232 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
233 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
234 ; RV32-NEXT: PseudoRET implicit $v8m8
236 ; RV64-LABEL: name: splat_zero_nxv64i8
237 ; RV64: bb.1 (%ir-block.0):
238 ; RV64-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
239 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 64 x s8>) = G_SPLAT_VECTOR [[C]](s8)
240 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 64 x s8>)
241 ; RV64-NEXT: PseudoRET implicit $v8m8
242 ret <vscale x 64 x i8> zeroinitializer
245 define <vscale x 1 x i16> @splat_zero_nxv1i16() {
246 ; RV32-LABEL: name: splat_zero_nxv1i16
247 ; RV32: bb.1 (%ir-block.0):
248 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
249 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
250 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
251 ; RV32-NEXT: PseudoRET implicit $v8
253 ; RV64-LABEL: name: splat_zero_nxv1i16
254 ; RV64: bb.1 (%ir-block.0):
255 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
256 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
257 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
258 ; RV64-NEXT: PseudoRET implicit $v8
259 ret <vscale x 1 x i16> zeroinitializer
262 define <vscale x 2 x i16> @splat_zero_nxv2i16() {
263 ; RV32-LABEL: name: splat_zero_nxv2i16
264 ; RV32: bb.1 (%ir-block.0):
265 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
266 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
267 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
268 ; RV32-NEXT: PseudoRET implicit $v8
270 ; RV64-LABEL: name: splat_zero_nxv2i16
271 ; RV64: bb.1 (%ir-block.0):
272 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
273 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
274 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
275 ; RV64-NEXT: PseudoRET implicit $v8
276 ret <vscale x 2 x i16> zeroinitializer
279 define <vscale x 4 x i16> @splat_zero_nxv4i16() {
280 ; RV32-LABEL: name: splat_zero_nxv4i16
281 ; RV32: bb.1 (%ir-block.0):
282 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
283 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
284 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
285 ; RV32-NEXT: PseudoRET implicit $v8
287 ; RV64-LABEL: name: splat_zero_nxv4i16
288 ; RV64: bb.1 (%ir-block.0):
289 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
290 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
291 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
292 ; RV64-NEXT: PseudoRET implicit $v8
293 ret <vscale x 4 x i16> zeroinitializer
296 define <vscale x 8 x i16> @splat_zero_nxv8i16() {
297 ; RV32-LABEL: name: splat_zero_nxv8i16
298 ; RV32: bb.1 (%ir-block.0):
299 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
300 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
301 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
302 ; RV32-NEXT: PseudoRET implicit $v8m2
304 ; RV64-LABEL: name: splat_zero_nxv8i16
305 ; RV64: bb.1 (%ir-block.0):
306 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
307 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
308 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
309 ; RV64-NEXT: PseudoRET implicit $v8m2
310 ret <vscale x 8 x i16> zeroinitializer
313 define <vscale x 16 x i16> @splat_zero_nxv16i16() {
314 ; RV32-LABEL: name: splat_zero_nxv16i16
315 ; RV32: bb.1 (%ir-block.0):
316 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
317 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
318 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
319 ; RV32-NEXT: PseudoRET implicit $v8m4
321 ; RV64-LABEL: name: splat_zero_nxv16i16
322 ; RV64: bb.1 (%ir-block.0):
323 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
324 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
325 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
326 ; RV64-NEXT: PseudoRET implicit $v8m4
327 ret <vscale x 16 x i16> zeroinitializer
330 define <vscale x 32 x i16> @splat_zero_nxv32i16() {
331 ; RV32-LABEL: name: splat_zero_nxv32i16
332 ; RV32: bb.1 (%ir-block.0):
333 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
334 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
335 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
336 ; RV32-NEXT: PseudoRET implicit $v8m8
338 ; RV64-LABEL: name: splat_zero_nxv32i16
339 ; RV64: bb.1 (%ir-block.0):
340 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
341 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
342 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
343 ; RV64-NEXT: PseudoRET implicit $v8m8
344 ret <vscale x 32 x i16> zeroinitializer
347 define <vscale x 1 x i32> @splat_zero_nxv1i32() {
348 ; RV32-LABEL: name: splat_zero_nxv1i32
349 ; RV32: bb.1 (%ir-block.0):
350 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
351 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
352 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
353 ; RV32-NEXT: PseudoRET implicit $v8
355 ; RV64-LABEL: name: splat_zero_nxv1i32
356 ; RV64: bb.1 (%ir-block.0):
357 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
358 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
359 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
360 ; RV64-NEXT: PseudoRET implicit $v8
361 ret <vscale x 1 x i32> zeroinitializer
364 define <vscale x 2 x i32> @splat_zero_nxv2i32() {
365 ; RV32-LABEL: name: splat_zero_nxv2i32
366 ; RV32: bb.1 (%ir-block.0):
367 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
368 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
369 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
370 ; RV32-NEXT: PseudoRET implicit $v8
372 ; RV64-LABEL: name: splat_zero_nxv2i32
373 ; RV64: bb.1 (%ir-block.0):
374 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
375 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
376 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
377 ; RV64-NEXT: PseudoRET implicit $v8
378 ret <vscale x 2 x i32> zeroinitializer
381 define <vscale x 4 x i32> @splat_zero_nxv4i32() {
382 ; RV32-LABEL: name: splat_zero_nxv4i32
383 ; RV32: bb.1 (%ir-block.0):
384 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
385 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
386 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
387 ; RV32-NEXT: PseudoRET implicit $v8m2
389 ; RV64-LABEL: name: splat_zero_nxv4i32
390 ; RV64: bb.1 (%ir-block.0):
391 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
392 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
393 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
394 ; RV64-NEXT: PseudoRET implicit $v8m2
395 ret <vscale x 4 x i32> zeroinitializer
398 define <vscale x 8 x i32> @splat_zero_nxv8i32() {
399 ; RV32-LABEL: name: splat_zero_nxv8i32
400 ; RV32: bb.1 (%ir-block.0):
401 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
402 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
403 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
404 ; RV32-NEXT: PseudoRET implicit $v8m4
406 ; RV64-LABEL: name: splat_zero_nxv8i32
407 ; RV64: bb.1 (%ir-block.0):
408 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
409 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
410 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
411 ; RV64-NEXT: PseudoRET implicit $v8m4
412 ret <vscale x 8 x i32> zeroinitializer
415 define <vscale x 16 x i32> @splat_zero_nxv16i32() {
416 ; RV32-LABEL: name: splat_zero_nxv16i32
417 ; RV32: bb.1 (%ir-block.0):
418 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
419 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
420 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
421 ; RV32-NEXT: PseudoRET implicit $v8m8
423 ; RV64-LABEL: name: splat_zero_nxv16i32
424 ; RV64: bb.1 (%ir-block.0):
425 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
426 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
427 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
428 ; RV64-NEXT: PseudoRET implicit $v8m8
429 ret <vscale x 16 x i32> zeroinitializer
432 define <vscale x 1 x i64> @splat_zero_nxv1i64() {
433 ; RV32-LABEL: name: splat_zero_nxv1i64
434 ; RV32: bb.1 (%ir-block.0):
435 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
436 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
437 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
438 ; RV32-NEXT: PseudoRET implicit $v8
440 ; RV64-LABEL: name: splat_zero_nxv1i64
441 ; RV64: bb.1 (%ir-block.0):
442 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
443 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
444 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
445 ; RV64-NEXT: PseudoRET implicit $v8
446 ret <vscale x 1 x i64> zeroinitializer
449 define <vscale x 2 x i64> @splat_zero_nxv2i64() {
450 ; RV32-LABEL: name: splat_zero_nxv2i64
451 ; RV32: bb.1 (%ir-block.0):
452 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
453 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
454 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
455 ; RV32-NEXT: PseudoRET implicit $v8m2
457 ; RV64-LABEL: name: splat_zero_nxv2i64
458 ; RV64: bb.1 (%ir-block.0):
459 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
460 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
461 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
462 ; RV64-NEXT: PseudoRET implicit $v8m2
463 ret <vscale x 2 x i64> zeroinitializer
466 define <vscale x 4 x i64> @splat_zero_nxv4i64() {
467 ; RV32-LABEL: name: splat_zero_nxv4i64
468 ; RV32: bb.1 (%ir-block.0):
469 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
470 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
471 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
472 ; RV32-NEXT: PseudoRET implicit $v8m4
474 ; RV64-LABEL: name: splat_zero_nxv4i64
475 ; RV64: bb.1 (%ir-block.0):
476 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
477 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
478 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
479 ; RV64-NEXT: PseudoRET implicit $v8m4
480 ret <vscale x 4 x i64> zeroinitializer
483 define <vscale x 8 x i64> @splat_zero_nxv8i64() {
484 ; RV32-LABEL: name: splat_zero_nxv8i64
485 ; RV32: bb.1 (%ir-block.0):
486 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
487 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
488 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
489 ; RV32-NEXT: PseudoRET implicit $v8m8
491 ; RV64-LABEL: name: splat_zero_nxv8i64
492 ; RV64: bb.1 (%ir-block.0):
493 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
494 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
495 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
496 ; RV64-NEXT: PseudoRET implicit $v8m8
497 ret <vscale x 8 x i64> zeroinitializer
500 define <vscale x 1 x half> @splat_zero_nxv1half() {
501 ; RV32-LABEL: name: splat_zero_nxv1half
502 ; RV32: bb.1 (%ir-block.0):
503 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
504 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
505 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
506 ; RV32-NEXT: PseudoRET implicit $v8
508 ; RV64-LABEL: name: splat_zero_nxv1half
509 ; RV64: bb.1 (%ir-block.0):
510 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
511 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s16>) = G_SPLAT_VECTOR [[C]](s16)
512 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s16>)
513 ; RV64-NEXT: PseudoRET implicit $v8
514 ret <vscale x 1 x half> zeroinitializer
517 define <vscale x 2 x half> @splat_zero_nxv2half() {
518 ; RV32-LABEL: name: splat_zero_nxv2half
519 ; RV32: bb.1 (%ir-block.0):
520 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
521 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
522 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
523 ; RV32-NEXT: PseudoRET implicit $v8
525 ; RV64-LABEL: name: splat_zero_nxv2half
526 ; RV64: bb.1 (%ir-block.0):
527 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
528 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s16>) = G_SPLAT_VECTOR [[C]](s16)
529 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s16>)
530 ; RV64-NEXT: PseudoRET implicit $v8
531 ret <vscale x 2 x half> zeroinitializer
534 define <vscale x 4 x half> @splat_zero_nxv4half() {
535 ; RV32-LABEL: name: splat_zero_nxv4half
536 ; RV32: bb.1 (%ir-block.0):
537 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
538 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
539 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
540 ; RV32-NEXT: PseudoRET implicit $v8
542 ; RV64-LABEL: name: splat_zero_nxv4half
543 ; RV64: bb.1 (%ir-block.0):
544 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
545 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s16>) = G_SPLAT_VECTOR [[C]](s16)
546 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s16>)
547 ; RV64-NEXT: PseudoRET implicit $v8
548 ret <vscale x 4 x half> zeroinitializer
551 define <vscale x 8 x half> @splat_zero_nxv8half() {
552 ; RV32-LABEL: name: splat_zero_nxv8half
553 ; RV32: bb.1 (%ir-block.0):
554 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
555 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
556 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
557 ; RV32-NEXT: PseudoRET implicit $v8m2
559 ; RV64-LABEL: name: splat_zero_nxv8half
560 ; RV64: bb.1 (%ir-block.0):
561 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
562 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s16>) = G_SPLAT_VECTOR [[C]](s16)
563 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s16>)
564 ; RV64-NEXT: PseudoRET implicit $v8m2
565 ret <vscale x 8 x half> zeroinitializer
568 define <vscale x 16 x half> @splat_zero_nxv16half() {
569 ; RV32-LABEL: name: splat_zero_nxv16half
570 ; RV32: bb.1 (%ir-block.0):
571 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
572 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
573 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
574 ; RV32-NEXT: PseudoRET implicit $v8m4
576 ; RV64-LABEL: name: splat_zero_nxv16half
577 ; RV64: bb.1 (%ir-block.0):
578 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
579 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s16>) = G_SPLAT_VECTOR [[C]](s16)
580 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s16>)
581 ; RV64-NEXT: PseudoRET implicit $v8m4
582 ret <vscale x 16 x half> zeroinitializer
585 define <vscale x 32 x half> @splat_zero_nxv32half() {
586 ; RV32-LABEL: name: splat_zero_nxv32half
587 ; RV32: bb.1 (%ir-block.0):
588 ; RV32-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
589 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
590 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
591 ; RV32-NEXT: PseudoRET implicit $v8m8
593 ; RV64-LABEL: name: splat_zero_nxv32half
594 ; RV64: bb.1 (%ir-block.0):
595 ; RV64-NEXT: [[C:%[0-9]+]]:_(s16) = G_FCONSTANT half 0xH0000
596 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 32 x s16>) = G_SPLAT_VECTOR [[C]](s16)
597 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 32 x s16>)
598 ; RV64-NEXT: PseudoRET implicit $v8m8
599 ret <vscale x 32 x half> zeroinitializer
602 define <vscale x 1 x float> @splat_zero_nxv1float() {
603 ; RV32-LABEL: name: splat_zero_nxv1float
604 ; RV32: bb.1 (%ir-block.0):
605 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
606 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
607 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
608 ; RV32-NEXT: PseudoRET implicit $v8
610 ; RV64-LABEL: name: splat_zero_nxv1float
611 ; RV64: bb.1 (%ir-block.0):
612 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
613 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s32>) = G_SPLAT_VECTOR [[C]](s32)
614 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s32>)
615 ; RV64-NEXT: PseudoRET implicit $v8
616 ret <vscale x 1 x float> zeroinitializer
619 define <vscale x 2 x float> @splat_zero_nxv2float() {
620 ; RV32-LABEL: name: splat_zero_nxv2float
621 ; RV32: bb.1 (%ir-block.0):
622 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
623 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
624 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
625 ; RV32-NEXT: PseudoRET implicit $v8
627 ; RV64-LABEL: name: splat_zero_nxv2float
628 ; RV64: bb.1 (%ir-block.0):
629 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
630 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s32>) = G_SPLAT_VECTOR [[C]](s32)
631 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s32>)
632 ; RV64-NEXT: PseudoRET implicit $v8
633 ret <vscale x 2 x float> zeroinitializer
636 define <vscale x 4 x float> @splat_zero_nxv4float() {
637 ; RV32-LABEL: name: splat_zero_nxv4float
638 ; RV32: bb.1 (%ir-block.0):
639 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
640 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
641 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
642 ; RV32-NEXT: PseudoRET implicit $v8m2
644 ; RV64-LABEL: name: splat_zero_nxv4float
645 ; RV64: bb.1 (%ir-block.0):
646 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
647 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s32>) = G_SPLAT_VECTOR [[C]](s32)
648 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s32>)
649 ; RV64-NEXT: PseudoRET implicit $v8m2
650 ret <vscale x 4 x float> zeroinitializer
653 define <vscale x 8 x float> @splat_zero_nxv8float() {
654 ; RV32-LABEL: name: splat_zero_nxv8float
655 ; RV32: bb.1 (%ir-block.0):
656 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
657 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
658 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
659 ; RV32-NEXT: PseudoRET implicit $v8m4
661 ; RV64-LABEL: name: splat_zero_nxv8float
662 ; RV64: bb.1 (%ir-block.0):
663 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
664 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s32>) = G_SPLAT_VECTOR [[C]](s32)
665 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s32>)
666 ; RV64-NEXT: PseudoRET implicit $v8m4
667 ret <vscale x 8 x float> zeroinitializer
670 define <vscale x 16 x float> @splat_zero_nxv16float() {
671 ; RV32-LABEL: name: splat_zero_nxv16float
672 ; RV32: bb.1 (%ir-block.0):
673 ; RV32-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
674 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
675 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
676 ; RV32-NEXT: PseudoRET implicit $v8m8
678 ; RV64-LABEL: name: splat_zero_nxv16float
679 ; RV64: bb.1 (%ir-block.0):
680 ; RV64-NEXT: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
681 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 16 x s32>) = G_SPLAT_VECTOR [[C]](s32)
682 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 16 x s32>)
683 ; RV64-NEXT: PseudoRET implicit $v8m8
684 ret <vscale x 16 x float> zeroinitializer
687 define <vscale x 1 x double> @splat_zero_nxv1double() {
688 ; RV32-LABEL: name: splat_zero_nxv1double
689 ; RV32: bb.1 (%ir-block.0):
690 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
691 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
692 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
693 ; RV32-NEXT: PseudoRET implicit $v8
695 ; RV64-LABEL: name: splat_zero_nxv1double
696 ; RV64: bb.1 (%ir-block.0):
697 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
698 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x s64>) = G_SPLAT_VECTOR [[C]](s64)
699 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x s64>)
700 ; RV64-NEXT: PseudoRET implicit $v8
701 ret <vscale x 1 x double> zeroinitializer
704 define <vscale x 2 x double> @splat_zero_nxv2double() {
705 ; RV32-LABEL: name: splat_zero_nxv2double
706 ; RV32: bb.1 (%ir-block.0):
707 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
708 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
709 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
710 ; RV32-NEXT: PseudoRET implicit $v8m2
712 ; RV64-LABEL: name: splat_zero_nxv2double
713 ; RV64: bb.1 (%ir-block.0):
714 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
715 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x s64>) = G_SPLAT_VECTOR [[C]](s64)
716 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x s64>)
717 ; RV64-NEXT: PseudoRET implicit $v8m2
718 ret <vscale x 2 x double> zeroinitializer
721 define <vscale x 4 x double> @splat_zero_nxv4double() {
722 ; RV32-LABEL: name: splat_zero_nxv4double
723 ; RV32: bb.1 (%ir-block.0):
724 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
725 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
726 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
727 ; RV32-NEXT: PseudoRET implicit $v8m4
729 ; RV64-LABEL: name: splat_zero_nxv4double
730 ; RV64: bb.1 (%ir-block.0):
731 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
732 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x s64>) = G_SPLAT_VECTOR [[C]](s64)
733 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x s64>)
734 ; RV64-NEXT: PseudoRET implicit $v8m4
735 ret <vscale x 4 x double> zeroinitializer
738 define <vscale x 8 x double> @splat_zero_nxv8double() {
739 ; RV32-LABEL: name: splat_zero_nxv8double
740 ; RV32: bb.1 (%ir-block.0):
741 ; RV32-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
742 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
743 ; RV32-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
744 ; RV32-NEXT: PseudoRET implicit $v8m8
746 ; RV64-LABEL: name: splat_zero_nxv8double
747 ; RV64: bb.1 (%ir-block.0):
748 ; RV64-NEXT: [[C:%[0-9]+]]:_(s64) = G_FCONSTANT double 0.000000e+00
749 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x s64>) = G_SPLAT_VECTOR [[C]](s64)
750 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x s64>)
751 ; RV64-NEXT: PseudoRET implicit $v8m8
752 ret <vscale x 8 x double> zeroinitializer
755 define <vscale x 1 x ptr> @splat_zero_nxv1ptr() {
756 ; RV32-LABEL: name: splat_zero_nxv1ptr
757 ; RV32: bb.1 (%ir-block.0):
758 ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
759 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0)
760 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>)
761 ; RV32-NEXT: PseudoRET implicit $v8
763 ; RV64-LABEL: name: splat_zero_nxv1ptr
764 ; RV64: bb.1 (%ir-block.0):
765 ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
766 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 1 x p0>) = G_SPLAT_VECTOR [[C]](p0)
767 ; RV64-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 1 x p0>)
768 ; RV64-NEXT: PseudoRET implicit $v8
769 ret <vscale x 1 x ptr> zeroinitializer
772 define <vscale x 2 x ptr> @splat_zero_nxv2ptr() {
773 ; RV32-LABEL: name: splat_zero_nxv2ptr
774 ; RV32: bb.1 (%ir-block.0):
775 ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
776 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0)
777 ; RV32-NEXT: $v8 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>)
778 ; RV32-NEXT: PseudoRET implicit $v8
780 ; RV64-LABEL: name: splat_zero_nxv2ptr
781 ; RV64: bb.1 (%ir-block.0):
782 ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
783 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 2 x p0>) = G_SPLAT_VECTOR [[C]](p0)
784 ; RV64-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 2 x p0>)
785 ; RV64-NEXT: PseudoRET implicit $v8m2
786 ret <vscale x 2 x ptr> zeroinitializer
789 define <vscale x 4 x ptr> @splat_zero_nxv4ptr() {
790 ; RV32-LABEL: name: splat_zero_nxv4ptr
791 ; RV32: bb.1 (%ir-block.0):
792 ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
793 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0)
794 ; RV32-NEXT: $v8m2 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>)
795 ; RV32-NEXT: PseudoRET implicit $v8m2
797 ; RV64-LABEL: name: splat_zero_nxv4ptr
798 ; RV64: bb.1 (%ir-block.0):
799 ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
800 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 4 x p0>) = G_SPLAT_VECTOR [[C]](p0)
801 ; RV64-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 4 x p0>)
802 ; RV64-NEXT: PseudoRET implicit $v8m4
803 ret <vscale x 4 x ptr> zeroinitializer
806 define <vscale x 8 x ptr> @splat_zero_nxv8ptr() {
807 ; RV32-LABEL: name: splat_zero_nxv8ptr
808 ; RV32: bb.1 (%ir-block.0):
809 ; RV32-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i32 0
810 ; RV32-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0)
811 ; RV32-NEXT: $v8m4 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>)
812 ; RV32-NEXT: PseudoRET implicit $v8m4
814 ; RV64-LABEL: name: splat_zero_nxv8ptr
815 ; RV64: bb.1 (%ir-block.0):
816 ; RV64-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
817 ; RV64-NEXT: [[SPLAT_VECTOR:%[0-9]+]]:_(<vscale x 8 x p0>) = G_SPLAT_VECTOR [[C]](p0)
818 ; RV64-NEXT: $v8m8 = COPY [[SPLAT_VECTOR]](<vscale x 8 x p0>)
819 ; RV64-NEXT: PseudoRET implicit $v8m8
820 ret <vscale x 8 x ptr> zeroinitializer