1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
5 ; test vector shifts converted to proper SSE2 vector shifts when the shift
6 ; amounts are the same.
8 define void @shift1a(<2 x i64> %val, ptr %dst) nounwind {
10 ; X86: # %bb.0: # %entry
11 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
12 ; X86-NEXT: psllq $32, %xmm0
13 ; X86-NEXT: movdqa %xmm0, (%eax)
17 ; X64: # %bb.0: # %entry
18 ; X64-NEXT: psllq $32, %xmm0
19 ; X64-NEXT: movdqa %xmm0, (%rdi)
22 %shl = shl <2 x i64> %val, < i64 32, i64 32 >
23 store <2 x i64> %shl, ptr %dst
27 define void @shift1b(<2 x i64> %val, ptr %dst, i64 %amt) nounwind {
29 ; X86: # %bb.0: # %entry
30 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
31 ; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
32 ; X86-NEXT: psllq %xmm1, %xmm0
33 ; X86-NEXT: movdqa %xmm0, (%eax)
37 ; X64: # %bb.0: # %entry
38 ; X64-NEXT: movq %rsi, %xmm1
39 ; X64-NEXT: psllq %xmm1, %xmm0
40 ; X64-NEXT: movdqa %xmm0, (%rdi)
43 %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
44 %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
45 %shl = shl <2 x i64> %val, %1
46 store <2 x i64> %shl, ptr %dst
51 define void @shift2a(<4 x i32> %val, ptr %dst) nounwind {
53 ; X86: # %bb.0: # %entry
54 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
55 ; X86-NEXT: pslld $5, %xmm0
56 ; X86-NEXT: movdqa %xmm0, (%eax)
60 ; X64: # %bb.0: # %entry
61 ; X64-NEXT: pslld $5, %xmm0
62 ; X64-NEXT: movdqa %xmm0, (%rdi)
65 %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
66 store <4 x i32> %shl, ptr %dst
70 define void @shift2b(<4 x i32> %val, ptr %dst, i32 %amt) nounwind {
72 ; X86: # %bb.0: # %entry
73 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
74 ; X86-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
75 ; X86-NEXT: pslld %xmm1, %xmm0
76 ; X86-NEXT: movdqa %xmm0, (%eax)
80 ; X64: # %bb.0: # %entry
81 ; X64-NEXT: movd %esi, %xmm1
82 ; X64-NEXT: pslld %xmm1, %xmm0
83 ; X64-NEXT: movdqa %xmm0, (%rdi)
86 %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
87 %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
88 %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
89 %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
90 %shl = shl <4 x i32> %val, %3
91 store <4 x i32> %shl, ptr %dst
95 define void @shift3a(<8 x i16> %val, ptr %dst) nounwind {
97 ; X86: # %bb.0: # %entry
98 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
99 ; X86-NEXT: psllw $5, %xmm0
100 ; X86-NEXT: movdqa %xmm0, (%eax)
103 ; X64-LABEL: shift3a:
104 ; X64: # %bb.0: # %entry
105 ; X64-NEXT: psllw $5, %xmm0
106 ; X64-NEXT: movdqa %xmm0, (%rdi)
109 %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
110 store <8 x i16> %shl, ptr %dst
114 ; Make sure the shift amount is properly zero extended.
115 define void @shift3b(<8 x i16> %val, ptr %dst, i16 %amt) nounwind {
116 ; X86-LABEL: shift3b:
117 ; X86: # %bb.0: # %entry
118 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
119 ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
120 ; X86-NEXT: movd %ecx, %xmm1
121 ; X86-NEXT: psllw %xmm1, %xmm0
122 ; X86-NEXT: movdqa %xmm0, (%eax)
125 ; X64-LABEL: shift3b:
126 ; X64: # %bb.0: # %entry
127 ; X64-NEXT: movzwl %si, %eax
128 ; X64-NEXT: movd %eax, %xmm1
129 ; X64-NEXT: psllw %xmm1, %xmm0
130 ; X64-NEXT: movdqa %xmm0, (%rdi)
133 %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
134 %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
135 %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
136 %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
137 %4 = insertelement <8 x i16> %3, i16 %amt, i32 4
138 %5 = insertelement <8 x i16> %4, i16 %amt, i32 5
139 %6 = insertelement <8 x i16> %5, i16 %amt, i32 6
140 %7 = insertelement <8 x i16> %6, i16 %amt, i32 7
141 %shl = shl <8 x i16> %val, %7
142 store <8 x i16> %shl, ptr %dst