1 ; These are tests for SSE3 codegen. Yonah has SSE3 and earlier but not SSSE3+.
3 ; RUN: llc < %s -march=x86-64 -mcpu=yonah -mtriple=i686-apple-darwin9\
4 ; RUN: | FileCheck %s --check-prefix=X64
6 ; Test for v8xi16 lowering where we extract the first element of the vector and
7 ; placed it in the second element of the result.
9 define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
11 %tmp3 = load <8 x i16>* %old
12 %tmp6 = shufflevector <8 x i16> %tmp3,
13 <8 x i16> < i16 0, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef >,
14 <8 x i32> < i32 8, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef >
15 store <8 x i16> %tmp6, <8 x i16>* %dest
19 ; X64: movddup (%rsi), %xmm0
20 ; X64: pshuflw $0, %xmm0, %xmm0
21 ; X64: xorl %eax, %eax
22 ; X64: pinsrw $0, %eax, %xmm0
23 ; X64: movaps %xmm0, (%rdi)
27 define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
28 %tmp1 = load <8 x i16>* %A
29 %tmp2 = load <8 x i16>* %B
30 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> < i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
34 ; X64: movl (%rsi), %eax
35 ; X64: movaps (%rdi), %xmm0
36 ; X64: pinsrw $0, %eax, %xmm0
40 define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
41 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 9, i32 1, i32 2, i32 9, i32 4, i32 5, i32 6, i32 7 >
44 ; X64: pextrw $1, %xmm1, %eax
45 ; X64: pinsrw $0, %eax, %xmm0
46 ; X64: pinsrw $3, %eax, %xmm0
50 define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
51 %tmp = shufflevector <8 x i16> %A, <8 x i16> %A, <8 x i32> < i32 8, i32 3, i32 2, i32 13, i32 7, i32 6, i32 5, i32 4 >
54 ; X64: pextrw $5, %xmm0, %eax
55 ; X64: pshuflw $44, %xmm0, %xmm0
56 ; X64: pshufhw $27, %xmm0, %xmm0
57 ; X64: pinsrw $3, %eax, %xmm0
61 define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
62 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 7, i32 2, i32 3, i32 1, i32 5, i32 6, i32 5 >
65 ; X64: pextrw $7, %xmm0, %eax
66 ; X64: pshufhw $100, %xmm0, %xmm1
67 ; X64: pinsrw $1, %eax, %xmm1
68 ; X64: pextrw $1, %xmm0, %eax
69 ; X64: movaps %xmm1, %xmm0
70 ; X64: pinsrw $4, %eax, %xmm0
74 define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
75 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 0, i32 1, i32 10, i32 11, i32 2, i32 3 >
78 ; X64: movlhps %xmm1, %xmm0
79 ; X64: pshufd $114, %xmm0, %xmm0
83 define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
84 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
87 ; X64: movss %xmm1, %xmm0
91 define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
92 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 0, i32 3, i32 2, i32 4, i32 6, i32 4, i32 7 >
95 ; X64: pshuflw $-80, %xmm0, %xmm0
96 ; X64: pshufhw $-56, %xmm0, %xmm0
100 define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
101 %tmp = load <2 x i64>* %A
102 %tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>
103 %tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0
104 %tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1
105 %tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2
106 %tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3
107 %tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 4
108 %tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5
109 %tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 6
110 %tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7
111 %tmp8 = insertelement <8 x i16> undef, i16 %tmp2, i32 0
112 %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1
113 %tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp0, i32 2
114 %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3
115 %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp6, i32 4
116 %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5
117 %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp4, i32 6
118 %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7
119 %tmp15.upgrd.2 = bitcast <8 x i16> %tmp15 to <2 x i64>
120 store <2 x i64> %tmp15.upgrd.2, <2 x i64>* %res
123 ; X64: pshuflw $-58, (%rsi), %xmm0
124 ; X64: pshufhw $-58, %xmm0, %xmm0
125 ; X64: movaps %xmm0, (%rdi)
129 define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
130 %tmp = load <4 x float>* %r
131 %tmp.upgrd.3 = bitcast <2 x i32>* %A to double*
132 %tmp.upgrd.4 = load double* %tmp.upgrd.3
133 %tmp.upgrd.5 = insertelement <2 x double> undef, double %tmp.upgrd.4, i32 0
134 %tmp5 = insertelement <2 x double> %tmp.upgrd.5, double undef, i32 1
135 %tmp6 = bitcast <2 x double> %tmp5 to <4 x float>
136 %tmp.upgrd.6 = extractelement <4 x float> %tmp, i32 0
137 %tmp7 = extractelement <4 x float> %tmp, i32 1
138 %tmp8 = extractelement <4 x float> %tmp6, i32 0
139 %tmp9 = extractelement <4 x float> %tmp6, i32 1
140 %tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.6, i32 0
141 %tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1
142 %tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2
143 %tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3
144 store <4 x float> %tmp13, <4 x float>* %r
147 ; X64: movsd (%rsi), %xmm0
148 ; X64: movhps %xmm0, (%rdi)
154 ; FIXME: This testcase produces icky code. It can be made much better!
157 @g1 = external constant <4 x i32>
158 @g2 = external constant <4 x i16>
160 define internal void @t10() nounwind {
161 load <4 x i32>* @g1, align 16
162 bitcast <4 x i32> %1 to <8 x i16>
163 shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
164 bitcast <8 x i16> %3 to <2 x i64>
165 extractelement <2 x i64> %4, i32 0
166 bitcast i64 %5 to <4 x i16>
167 store <4 x i16> %6, <4 x i16>* @g2, align 8
170 ; X64: movq _g1@GOTPCREL(%rip), %rax
171 ; X64: movaps (%rax), %xmm0
172 ; X64: pextrw $4, %xmm0, %eax
173 ; X64: movaps %xmm0, %xmm1
174 ; X64: movlhps %xmm1, %xmm1
175 ; X64: pshuflw $8, %xmm1, %xmm1
176 ; X64: pinsrw $2, %eax, %xmm1
177 ; X64: pextrw $6, %xmm0, %eax
178 ; X64: pinsrw $3, %eax, %xmm1
179 ; X64: movq _g2@GOTPCREL(%rip), %rax
180 ; X64: movq %xmm1, (%rax)
185 ; Pack various elements via shuffles.
186 define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
188 %tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
192 ; X64: movd %xmm1, %eax
193 ; X64: movlhps %xmm0, %xmm0
194 ; X64: pshuflw $1, %xmm0, %xmm0
195 ; X64: pinsrw $1, %eax, %xmm0
200 define <8 x i16> @t12(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
202 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
206 ; X64: pextrw $3, %xmm1, %eax
207 ; X64: movlhps %xmm0, %xmm0
208 ; X64: pshufhw $3, %xmm0, %xmm0
209 ; X64: pinsrw $5, %eax, %xmm0
214 define <8 x i16> @t13(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
216 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 11, i32 3, i32 undef , i32 undef >
219 ; X64: punpcklqdq %xmm0, %xmm1
220 ; X64: pextrw $3, %xmm1, %eax
221 ; X64: pshufd $52, %xmm1, %xmm0
222 ; X64: pinsrw $4, %eax, %xmm0
227 define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
229 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef , i32 undef >
232 ; X64: punpcklqdq %xmm0, %xmm1
233 ; X64: pshufhw $8, %xmm1, %xmm0
239 define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
241 %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
244 ; X64: pextrw $7, %xmm0, %eax
245 ; X64: punpcklqdq %xmm1, %xmm0
246 ; X64: pshuflw $-128, %xmm0, %xmm0
247 ; X64: pinsrw $2, %eax, %xmm0
252 ; Test yonah where we convert a shuffle to pextrw and pinrsw
253 define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
255 %tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
256 %tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
259 ; X64: movaps LCPI17_0(%rip), %xmm1
260 ; X64: movd %xmm1, %eax
261 ; X64: pinsrw $0, %eax, %xmm1
262 ; X64: pextrw $8, %xmm0, %eax
263 ; X64: pinsrw $1, %eax, %xmm1
264 ; X64: pextrw $1, %xmm1, %ecx
265 ; X64: movd %xmm1, %edx
266 ; X64: pinsrw $0, %edx, %xmm1
267 ; X64: movzbl %cl, %ecx
268 ; X64: andw $-256, %ax
270 ; X64: movaps %xmm1, %xmm0
271 ; X64: pinsrw $1, %eax, %xmm0