1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc < %s -mtriple=riscv64 -mattr=+v | FileCheck %s
4 define <4 x i8> @test_v4i16_v4i8(<4 x i16> %x) {
5 ; CHECK-LABEL: test_v4i16_v4i8:
7 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
8 ; CHECK-NEXT: vmax.vx v8, v8, zero
9 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
10 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
12 %a = icmp sgt <4 x i16> %x, zeroinitializer
13 %b = sext <4 x i1> %a to <4 x i16>
14 %c = icmp ult <4 x i16> %x, splat (i16 256)
15 %d = select <4 x i1> %c, <4 x i16> %x, <4 x i16> %b
16 %e = trunc <4 x i16> %d to <4 x i8>
20 define <4 x i8> @test_v4i32_v4i8(<4 x i32> %x) {
21 ; CHECK-LABEL: test_v4i32_v4i8:
23 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
24 ; CHECK-NEXT: vmax.vx v8, v8, zero
25 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
26 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
27 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
28 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
30 %a = icmp sgt <4 x i32> %x, zeroinitializer
31 %b = sext <4 x i1> %a to <4 x i32>
32 %c = icmp ult <4 x i32> %x, splat (i32 256)
33 %d = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %b
34 %e = trunc <4 x i32> %d to <4 x i8>
38 define <4 x i8> @test_v4i64_v4i8(<4 x i64> %x) {
39 ; CHECK-LABEL: test_v4i64_v4i8:
41 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
42 ; CHECK-NEXT: vmax.vx v8, v8, zero
43 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
44 ; CHECK-NEXT: vnclipu.wi v10, v8, 0
45 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
46 ; CHECK-NEXT: vnclipu.wi v8, v10, 0
47 ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
48 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
50 %a = icmp sgt <4 x i64> %x, zeroinitializer
51 %b = sext <4 x i1> %a to <4 x i64>
52 %c = icmp ult <4 x i64> %x, splat (i64 256)
53 %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
54 %e = trunc <4 x i64> %d to <4 x i8>
58 define <4 x i16> @test_v4i32_v4i16(<4 x i32> %x) {
59 ; CHECK-LABEL: test_v4i32_v4i16:
61 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
62 ; CHECK-NEXT: vmax.vx v8, v8, zero
63 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
64 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
66 %a = icmp sgt <4 x i32> %x, zeroinitializer
67 %b = sext <4 x i1> %a to <4 x i32>
68 %c = icmp ult <4 x i32> %x, splat (i32 65536)
69 %d = select <4 x i1> %c, <4 x i32> %x, <4 x i32> %b
70 %e = trunc <4 x i32> %d to <4 x i16>
74 define <4 x i16> @test_v4i64_v4i16(<4 x i64> %x) {
75 ; CHECK-LABEL: test_v4i64_v4i16:
77 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
78 ; CHECK-NEXT: vmax.vx v8, v8, zero
79 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
80 ; CHECK-NEXT: vnclipu.wi v10, v8, 0
81 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
82 ; CHECK-NEXT: vnclipu.wi v8, v10, 0
84 %a = icmp sgt <4 x i64> %x, zeroinitializer
85 %b = sext <4 x i1> %a to <4 x i64>
86 %c = icmp ult <4 x i64> %x, splat (i64 65536)
87 %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
88 %e = trunc <4 x i64> %d to <4 x i16>
92 define <4 x i32> @test_v4i64_v4i32(<4 x i64> %x) {
93 ; CHECK-LABEL: test_v4i64_v4i32:
95 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
96 ; CHECK-NEXT: vmax.vx v10, v8, zero
97 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
98 ; CHECK-NEXT: vnclipu.wi v8, v10, 0
100 %a = icmp sgt <4 x i64> %x, zeroinitializer
101 %b = sext <4 x i1> %a to <4 x i64>
102 %c = icmp ult <4 x i64> %x, splat (i64 4294967296)
103 %d = select <4 x i1> %c, <4 x i64> %x, <4 x i64> %b
104 %e = trunc <4 x i64> %d to <4 x i32>
108 define <vscale x 4 x i8> @test_nxv4i16_nxv4i8(<vscale x 4 x i16> %x) {
109 ; CHECK-LABEL: test_nxv4i16_nxv4i8:
111 ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
112 ; CHECK-NEXT: vmax.vx v8, v8, zero
113 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
114 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
116 %a = icmp sgt <vscale x 4 x i16> %x, zeroinitializer
117 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i16>
118 %c = icmp ult <vscale x 4 x i16> %x, splat (i16 256)
119 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i16> %x, <vscale x 4 x i16> %b
120 %e = trunc <vscale x 4 x i16> %d to <vscale x 4 x i8>
121 ret <vscale x 4 x i8> %e
124 define <vscale x 4 x i8> @test_nxv4i32_nxv4i8(<vscale x 4 x i32> %x) {
125 ; CHECK-LABEL: test_nxv4i32_nxv4i8:
127 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
128 ; CHECK-NEXT: vmax.vx v8, v8, zero
129 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
130 ; CHECK-NEXT: vnclipu.wi v10, v8, 0
131 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
132 ; CHECK-NEXT: vnclipu.wi v8, v10, 0
134 %a = icmp sgt <vscale x 4 x i32> %x, zeroinitializer
135 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
136 %c = icmp ult <vscale x 4 x i32> %x, splat (i32 256)
137 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> %b
138 %e = trunc <vscale x 4 x i32> %d to <vscale x 4 x i8>
139 ret <vscale x 4 x i8> %e
142 define <vscale x 4 x i8> @test_nxv4i64_nxv4i8(<vscale x 4 x i64> %x) {
143 ; CHECK-LABEL: test_nxv4i64_nxv4i8:
145 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
146 ; CHECK-NEXT: vmax.vx v8, v8, zero
147 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
148 ; CHECK-NEXT: vnclipu.wi v12, v8, 0
149 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
150 ; CHECK-NEXT: vnclipu.wi v8, v12, 0
151 ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
152 ; CHECK-NEXT: vnclipu.wi v8, v8, 0
154 %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
155 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
156 %c = icmp ult <vscale x 4 x i64> %x, splat (i64 256)
157 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
158 %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i8>
159 ret <vscale x 4 x i8> %e
162 define <vscale x 4 x i16> @test_nxv4i32_nxv4i16(<vscale x 4 x i32> %x) {
163 ; CHECK-LABEL: test_nxv4i32_nxv4i16:
165 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
166 ; CHECK-NEXT: vmax.vx v10, v8, zero
167 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
168 ; CHECK-NEXT: vnclipu.wi v8, v10, 0
170 %a = icmp sgt <vscale x 4 x i32> %x, zeroinitializer
171 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i32>
172 %c = icmp ult <vscale x 4 x i32> %x, splat (i32 65536)
173 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i32> %x, <vscale x 4 x i32> %b
174 %e = trunc <vscale x 4 x i32> %d to <vscale x 4 x i16>
175 ret <vscale x 4 x i16> %e
178 define <vscale x 4 x i16> @test_nxv4i64_nxv4i16(<vscale x 4 x i64> %x) {
179 ; CHECK-LABEL: test_nxv4i64_nxv4i16:
181 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
182 ; CHECK-NEXT: vmax.vx v8, v8, zero
183 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
184 ; CHECK-NEXT: vnclipu.wi v12, v8, 0
185 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
186 ; CHECK-NEXT: vnclipu.wi v8, v12, 0
188 %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
189 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
190 %c = icmp ult <vscale x 4 x i64> %x, splat (i64 65536)
191 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
192 %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i16>
193 ret <vscale x 4 x i16> %e
196 define <vscale x 4 x i32> @test_nxv4i64_nxv4i32(<vscale x 4 x i64> %x) {
197 ; CHECK-LABEL: test_nxv4i64_nxv4i32:
199 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
200 ; CHECK-NEXT: vmax.vx v12, v8, zero
201 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
202 ; CHECK-NEXT: vnclipu.wi v8, v12, 0
204 %a = icmp sgt <vscale x 4 x i64> %x, zeroinitializer
205 %b = sext <vscale x 4 x i1> %a to <vscale x 4 x i64>
206 %c = icmp ult <vscale x 4 x i64> %x, splat (i64 4294967296)
207 %d = select <vscale x 4 x i1> %c, <vscale x 4 x i64> %x, <vscale x 4 x i64> %b
208 %e = trunc <vscale x 4 x i64> %d to <vscale x 4 x i32>
209 ret <vscale x 4 x i32> %e