1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver2 | FileCheck %s --check-prefixes=CHECK,XOP,XOPAVX1
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver4 | FileCheck %s --check-prefixes=CHECK,XOP,XOPAVX2
4 ; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 | FileCheck %s --check-prefixes=CHECK,AVX512
6 define <4 x i32> @rot_v4i32_splat(<4 x i32> %x) {
7 ; XOP-LABEL: rot_v4i32_splat:
9 ; XOP-NEXT: vprotd $31, %xmm0, %xmm0
12 ; AVX512-LABEL: rot_v4i32_splat:
14 ; AVX512-NEXT: vprold $31, %xmm0, %xmm0
16 %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
17 %2 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
18 %3 = or <4 x i32> %1, %2
22 define <4 x i32> @rot_v4i32_non_splat(<4 x i32> %x) {
23 ; XOP-LABEL: rot_v4i32_non_splat:
25 ; XOP-NEXT: vprotd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
28 ; AVX512-LABEL: rot_v4i32_non_splat:
30 ; AVX512-NEXT: vprolvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
32 %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
33 %2 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
34 %3 = or <4 x i32> %1, %2
38 define <4 x i32> @rot_v4i32_splat_2masks(<4 x i32> %x) {
39 ; XOP-LABEL: rot_v4i32_splat_2masks:
41 ; XOP-NEXT: vprotd $31, %xmm0, %xmm0
42 ; XOP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
45 ; AVX512-LABEL: rot_v4i32_splat_2masks:
47 ; AVX512-NEXT: vprold $31, %xmm0, %xmm0
48 ; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
50 %1 = lshr <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
51 %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
53 %3 = shl <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
54 %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760>
55 %5 = or <4 x i32> %2, %4
59 define <4 x i32> @rot_v4i32_non_splat_2masks(<4 x i32> %x) {
60 ; XOP-LABEL: rot_v4i32_non_splat_2masks:
62 ; XOP-NEXT: vprotd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
63 ; XOP-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
66 ; AVX512-LABEL: rot_v4i32_non_splat_2masks:
68 ; AVX512-NEXT: vprolvd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
69 ; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
71 %1 = lshr <4 x i32> %x, <i32 1, i32 2, i32 3, i32 4>
72 %2 = and <4 x i32> %1, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
74 %3 = shl <4 x i32> %x, <i32 31, i32 30, i32 29, i32 28>
75 %4 = and <4 x i32> %3, <i32 0, i32 4294901760, i32 0, i32 4294901760>
76 %5 = or <4 x i32> %2, %4
80 define <4 x i32> @rot_v4i32_zero_non_splat(<4 x i32> %x) {
81 ; XOPAVX1-LABEL: rot_v4i32_zero_non_splat:
83 ; XOPAVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,0,0,0]
86 ; XOPAVX2-LABEL: rot_v4i32_zero_non_splat:
88 ; XOPAVX2-NEXT: vbroadcastss %xmm0, %xmm0
91 ; AVX512-LABEL: rot_v4i32_zero_non_splat:
93 ; AVX512-NEXT: vbroadcastss %xmm0, %xmm0
95 %1 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %x, <4 x i32> %x, <4 x i32> <i32 0, i32 1, i32 2, i32 3>)
96 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
100 define <4 x i32> @rot_v4i32_allsignbits(<4 x i32> %x, <4 x i32> %y) {
101 ; CHECK-LABEL: rot_v4i32_allsignbits:
103 ; CHECK-NEXT: vpsrad $31, %xmm0, %xmm0
105 %1 = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
106 %2 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %1, <4 x i32> %1, <4 x i32> %y)
110 define <4 x i32> @rot_v4i32_mask_ashr0(<4 x i32> %a0) {
111 ; XOPAVX1-LABEL: rot_v4i32_mask_ashr0:
113 ; XOPAVX1-NEXT: vpshad {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
114 ; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
117 ; XOPAVX2-LABEL: rot_v4i32_mask_ashr0:
119 ; XOPAVX2-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
120 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
123 ; AVX512-LABEL: rot_v4i32_mask_ashr0:
125 ; AVX512-NEXT: vpsravd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
126 ; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
128 %1 = ashr <4 x i32> %a0, <i32 25, i32 26, i32 27, i32 28>
129 %2 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
130 %3 = ashr <4 x i32> %2, <i32 1, i32 2, i32 3, i32 4>
131 %4 = and <4 x i32> %3, <i32 -32768, i32 -65536, i32 -32768, i32 -65536>
135 define <4 x i32> @rot_v4i32_mask_ashr1(<4 x i32> %a0) {
136 ; XOPAVX1-LABEL: rot_v4i32_mask_ashr1:
138 ; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
139 ; XOPAVX1-NEXT: vpsrad $25, %xmm0, %xmm0
140 ; XOPAVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
143 ; XOPAVX2-LABEL: rot_v4i32_mask_ashr1:
145 ; XOPAVX2-NEXT: vpsrad $25, %xmm0, %xmm0
146 ; XOPAVX2-NEXT: vpbroadcastd %xmm0, %xmm0
147 ; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
150 ; AVX512-LABEL: rot_v4i32_mask_ashr1:
152 ; AVX512-NEXT: vpsrad $25, %xmm0, %xmm0
153 ; AVX512-NEXT: vpbroadcastd %xmm0, %xmm0
154 ; AVX512-NEXT: vpandq {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to2}, %xmm0, %xmm0
156 %1 = ashr <4 x i32> %a0, <i32 25, i32 26, i32 27, i32 28>
157 %2 = call <4 x i32> @llvm.fshl.v4i32(<4 x i32> %1, <4 x i32> %1, <4 x i32> <i32 1, i32 2, i32 3, i32 4>)
158 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> zeroinitializer
159 %4 = ashr <4 x i32> %3, <i32 1, i32 2, i32 3, i32 4>
160 %5 = and <4 x i32> %4, <i32 -4096, i32 -8192, i32 -4096, i32 -8192>
164 define <8 x i16> @or_fshl_v8i16(<8 x i16> %x, <8 x i16> %y) {
165 ; XOPAVX1-LABEL: or_fshl_v8i16:
167 ; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm1
168 ; XOPAVX1-NEXT: vpsrlw $11, %xmm0, %xmm0
169 ; XOPAVX1-NEXT: vpsllw $5, %xmm1, %xmm1
170 ; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
173 ; XOPAVX2-LABEL: or_fshl_v8i16:
175 ; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm1
176 ; XOPAVX2-NEXT: vpsllw $5, %xmm1, %xmm1
177 ; XOPAVX2-NEXT: vpsrlw $11, %xmm0, %xmm0
178 ; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
181 ; AVX512-LABEL: or_fshl_v8i16:
183 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm1
184 ; AVX512-NEXT: vpsllw $5, %xmm1, %xmm1
185 ; AVX512-NEXT: vpsrlw $11, %xmm0, %xmm0
186 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
188 %or1 = or <8 x i16> %y, %x
189 %sh1 = shl <8 x i16> %or1, <i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5>
190 %sh2 = lshr <8 x i16> %x, <i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11, i16 11>
191 %r = or <8 x i16> %sh2, %sh1
195 define <4 x i32> @or_fshl_v4i32(<4 x i32> %x, <4 x i32> %y) {
196 ; XOPAVX1-LABEL: or_fshl_v4i32:
198 ; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm1
199 ; XOPAVX1-NEXT: vpsrld $11, %xmm0, %xmm0
200 ; XOPAVX1-NEXT: vpslld $21, %xmm1, %xmm1
201 ; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
204 ; XOPAVX2-LABEL: or_fshl_v4i32:
206 ; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm1
207 ; XOPAVX2-NEXT: vpslld $21, %xmm1, %xmm1
208 ; XOPAVX2-NEXT: vpsrld $11, %xmm0, %xmm0
209 ; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
212 ; AVX512-LABEL: or_fshl_v4i32:
214 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm1
215 ; AVX512-NEXT: vpslld $21, %xmm1, %xmm1
216 ; AVX512-NEXT: vpsrld $11, %xmm0, %xmm0
217 ; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
219 %or1 = or <4 x i32> %y, %x
220 %sh1 = shl <4 x i32> %or1, <i32 21, i32 21, i32 21, i32 21>
221 %sh2 = lshr <4 x i32> %x, <i32 11, i32 11, i32 11, i32 11>
222 %r = or <4 x i32> %sh2, %sh1
226 define <2 x i64> @or_fshr_v2i64(<2 x i64> %x, <2 x i64> %y) {
227 ; XOP-LABEL: or_fshr_v2i64:
229 ; XOP-NEXT: vpsrlq $22, %xmm1, %xmm1
230 ; XOP-NEXT: vprotq $42, %xmm0, %xmm0
231 ; XOP-NEXT: vpor %xmm1, %xmm0, %xmm0
234 ; AVX512-LABEL: or_fshr_v2i64:
236 ; AVX512-NEXT: vpsrlq $22, %xmm1, %xmm1
237 ; AVX512-NEXT: vprolq $42, %xmm0, %xmm0
238 ; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
240 %or1 = or <2 x i64> %x, %y
241 %sh1 = shl <2 x i64> %x, <i64 42, i64 42>
242 %sh2 = lshr <2 x i64> %or1, <i64 22, i64 22>
243 %r = or <2 x i64> %sh1, %sh2
247 declare <4 x i32> @llvm.fshl.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)