1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s
4 ; Test that the pshufb mask comment is correct.
6 define <16 x i8> @test1(<16 x i8> %V) {
9 ; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,0,0,0,2,0,0,0,0,3,0,0,0,0,4]
11 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 0, i8 0, i8 2, i8 0, i8 0, i8 0, i8 0, i8 3, i8 0, i8 0, i8 0, i8 0, i8 4>)
15 ; Test that indexes larger than the size of the vector are shown masked (bottom 4 bits).
17 define <16 x i8> @test2(<16 x i8> %V) {
20 ; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[15,0,0,0,0,0,0,0,0,0,1,0,0,0,0,2]
22 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 15, i8 0, i8 0, i8 0, i8 0, i8 16, i8 0, i8 0, i8 0, i8 0, i8 17, i8 0, i8 0, i8 0, i8 0, i8 50>)
26 ; Test that indexes with bit seven set are shown as zero.
28 define <16 x i8> @test3(<16 x i8> %V) {
31 ; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[1,0,0,15,0,2,0,0],zero,xmm0[0,3,0,0],zero,xmm0[0,4]
33 %1 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> <i8 1, i8 0, i8 0, i8 127, i8 0, i8 2, i8 0, i8 0, i8 128, i8 0, i8 3, i8 0, i8 0, i8 255, i8 0, i8 4>)
37 ; Test that we won't crash when the constant was reused for another instruction.
39 define <16 x i8> @test4(<16 x i8> %V, ptr %P) {
42 ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1084818905618843912,506097522914230528]
43 ; CHECK-NEXT: movaps %xmm1, (%rdi)
44 ; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
46 %1 = insertelement <2 x i64> undef, i64 1084818905618843912, i32 0
47 %2 = insertelement <2 x i64> %1, i64 506097522914230528, i32 1
48 store <2 x i64> %2, ptr %P, align 16
49 %3 = bitcast <2 x i64> %2 to <16 x i8>
50 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> %3)
54 define <16 x i8> @test5(<16 x i8> %V) {
57 ; CHECK-NEXT: movss {{.*#+}} xmm1 = [1,0,0,0]
58 ; CHECK-NEXT: movaps %xmm1, (%rax)
59 ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [1,1]
60 ; CHECK-NEXT: movaps %xmm1, (%rax)
61 ; CHECK-NEXT: pshufb (%rax), %xmm0
63 store <2 x i64> <i64 1, i64 0>, ptr undef, align 16
64 %l = load <2 x i64>, ptr undef, align 16
65 %shuffle = shufflevector <2 x i64> %l, <2 x i64> undef, <2 x i32> zeroinitializer
66 store <2 x i64> %shuffle, ptr undef, align 16
67 %1 = load <16 x i8>, ptr undef, align 16
68 %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> %1)
72 ; Test for a reused constant that would allow the pshufb to combine to a simpler instruction.
74 define <16 x i8> @test6(<16 x i8> %V, ptr %P) {
77 ; CHECK-NEXT: movaps {{.*#+}} xmm1 = [217019414673948672,506380106026255364]
78 ; CHECK-NEXT: movaps %xmm1, (%rdi)
79 ; CHECK-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
81 %1 = insertelement <2 x i64> undef, i64 217019414673948672, i32 0
82 %2 = insertelement <2 x i64> %1, i64 506380106026255364, i32 1
83 store <2 x i64> %2, ptr %P, align 16
84 %3 = bitcast <2 x i64> %2 to <16 x i8>
85 %4 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %V, <16 x i8> %3)
89 declare <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8>, <16 x i8>) nounwind readnone