1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s
3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s
5 define <8 x float> @test_unpacklo_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
6 ; CHECK-LABEL: test_unpacklo_hadd_v8f32:
8 ; CHECK-NEXT: vhaddps %ymm2, %ymm0, %ymm0
9 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
10 ; CHECK-NEXT: ret{{[l|q]}}
11 %5 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %0, <8 x float> %1) #4
12 %6 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %2, <8 x float> %3) #4
13 %7 = shufflevector <8 x float> %5, <8 x float> %6, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
17 define <8 x float> @test_unpackhi_hadd_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
18 ; CHECK-LABEL: test_unpackhi_hadd_v8f32:
20 ; CHECK-NEXT: vhaddps %ymm3, %ymm1, %ymm0
21 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
22 ; CHECK-NEXT: ret{{[l|q]}}
23 %5 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %0, <8 x float> %1) #4
24 %6 = tail call <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float> %2, <8 x float> %3) #4
25 %7 = shufflevector <8 x float> %5, <8 x float> %6, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
29 define <8 x float> @test_unpacklo_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
30 ; CHECK-LABEL: test_unpacklo_hsub_v8f32:
32 ; CHECK-NEXT: vhsubps %ymm2, %ymm0, %ymm0
33 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
34 ; CHECK-NEXT: ret{{[l|q]}}
35 %5 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %0, <8 x float> %1) #4
36 %6 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %2, <8 x float> %3) #4
37 %7 = shufflevector <8 x float> %5, <8 x float> %6, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
41 define <8 x float> @test_unpackhi_hsub_v8f32(<8 x float> %0, <8 x float> %1, <8 x float> %2, <8 x float> %3) {
42 ; CHECK-LABEL: test_unpackhi_hsub_v8f32:
44 ; CHECK-NEXT: vhsubps %ymm3, %ymm1, %ymm0
45 ; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
46 ; CHECK-NEXT: ret{{[l|q]}}
47 %5 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %0, <8 x float> %1) #4
48 %6 = tail call <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float> %2, <8 x float> %3) #4
49 %7 = shufflevector <8 x float> %5, <8 x float> %6, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
53 define <8 x i32> @test_unpacklo_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
54 ; CHECK-LABEL: test_unpacklo_hadd_v8i32:
56 ; CHECK-NEXT: vphaddd %ymm2, %ymm0, %ymm0
57 ; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
58 ; CHECK-NEXT: ret{{[l|q]}}
59 %5 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %0, <8 x i32> %1) #5
60 %6 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %2, <8 x i32> %3) #5
61 %7 = shufflevector <8 x i32> %5, <8 x i32> %6, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
65 define <8 x i32> @test_unpackhi_hadd_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
66 ; CHECK-LABEL: test_unpackhi_hadd_v8i32:
68 ; CHECK-NEXT: vphaddd %ymm3, %ymm1, %ymm0
69 ; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
70 ; CHECK-NEXT: ret{{[l|q]}}
71 %5 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %0, <8 x i32> %1) #5
72 %6 = tail call <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32> %2, <8 x i32> %3) #5
73 %7 = shufflevector <8 x i32> %5, <8 x i32> %6, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
77 define <8 x i32> @test_unpacklo_hsub_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
78 ; CHECK-LABEL: test_unpacklo_hsub_v8i32:
80 ; CHECK-NEXT: vphsubd %ymm2, %ymm0, %ymm0
81 ; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
82 ; CHECK-NEXT: ret{{[l|q]}}
83 %5 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %0, <8 x i32> %1) #5
84 %6 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %2, <8 x i32> %3) #5
85 %7 = shufflevector <8 x i32> %5, <8 x i32> %6, <8 x i32> <i32 0, i32 8, i32 1, i32 9, i32 4, i32 12, i32 5, i32 13>
89 define <8 x i32> @test_unpackhi_hsub_v8i32(<8 x i32> %0, <8 x i32> %1, <8 x i32> %2, <8 x i32> %3) {
90 ; CHECK-LABEL: test_unpackhi_hsub_v8i32:
92 ; CHECK-NEXT: vphsubd %ymm3, %ymm1, %ymm0
93 ; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7]
94 ; CHECK-NEXT: ret{{[l|q]}}
95 %5 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %0, <8 x i32> %1) #5
96 %6 = tail call <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32> %2, <8 x i32> %3) #5
97 %7 = shufflevector <8 x i32> %5, <8 x i32> %6, <8 x i32> <i32 2, i32 10, i32 3, i32 11, i32 6, i32 14, i32 7, i32 15>
101 define <4 x double> @PR49971(<4 x double> %0) {
102 ; CHECK-LABEL: PR49971:
104 ; CHECK-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
105 ; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0
106 ; CHECK-NEXT: ret{{[l|q]}}
107 %2 = tail call <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double> %0, <4 x double> %0)
108 %3 = shufflevector <4 x double> %2, <4 x double> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
112 declare <8 x float> @llvm.x86.avx.hadd.ps.256(<8 x float>, <8 x float>)
113 declare <8 x float> @llvm.x86.avx.hsub.ps.256(<8 x float>, <8 x float>)
114 declare <4 x double> @llvm.x86.avx.hadd.pd.256(<4 x double>, <4 x double>)
115 declare <4 x double> @llvm.x86.avx.hsub.pd.256(<4 x double>, <4 x double>)
117 declare <16 x i16> @llvm.x86.avx2.phadd.w(<16 x i16>, <16 x i16>)
118 declare <8 x i32> @llvm.x86.avx2.phadd.d(<8 x i32>, <8 x i32>)
119 declare <16 x i16> @llvm.x86.avx2.phsub.w(<16 x i16>, <16 x i16>)
120 declare <8 x i32> @llvm.x86.avx2.phsub.d(<8 x i32>, <8 x i32>)
122 declare <32 x i8> @llvm.x86.avx2.packsswb(<16 x i16>, <16 x i16>)
123 declare <16 x i16> @llvm.x86.avx2.packssdw(<8 x i32>, <8 x i32>)
124 declare <32 x i8> @llvm.x86.avx2.packuswb(<16 x i16>, <16 x i16>)
125 declare <16 x i16> @llvm.x86.avx2.packusdw(<8 x i32>, <8 x i32>)