1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+sse2 -S | FileCheck %s --check-prefix=SSE
3 ; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx -S | FileCheck %s --check-prefix=AVX
4 ; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx2 -S | FileCheck %s --check-prefix=AVX
5 ; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx512f -S | FileCheck %s --check-prefix=AVX
6 ; RUN: opt < %s -slp-threshold=-6 -passes=slp-vectorizer,instcombine -mattr=+avx512vl -S | FileCheck %s --check-prefix=AVX
8 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9 target triple = "x86_64-unknown-linux-gnu"
11 ; These tests ensure that we do not regress due to PR31243. Note that we set
12 ; the SLP threshold to force vectorization even when not profitable.
14 ; When computing minimum sizes, if we can prove the sign bit is zero, we can
15 ; zero-extend the roots back to their original sizes.
17 define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
18 ; SSE-LABEL: @PR31243_zext(
20 ; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
21 ; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
22 ; SSE-NEXT: [[TMP2:%.*]] = zext i8 [[TMP0]] to i64
23 ; SSE-NEXT: [[TMP_4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
24 ; SSE-NEXT: [[TMP3:%.*]] = zext i8 [[TMP1]] to i64
25 ; SSE-NEXT: [[TMP_5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
26 ; SSE-NEXT: [[TMP_6:%.*]] = load i8, ptr [[TMP_4]], align 1
27 ; SSE-NEXT: [[TMP_7:%.*]] = load i8, ptr [[TMP_5]], align 1
28 ; SSE-NEXT: [[TMP_8:%.*]] = add i8 [[TMP_6]], [[TMP_7]]
29 ; SSE-NEXT: ret i8 [[TMP_8]]
31 ; AVX-LABEL: @PR31243_zext(
33 ; AVX-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
34 ; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
35 ; AVX-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
36 ; AVX-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i64 0
37 ; AVX-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
38 ; AVX-NEXT: [[TMP_4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP4]]
39 ; AVX-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i64 1
40 ; AVX-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
41 ; AVX-NEXT: [[TMP_5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP6]]
42 ; AVX-NEXT: [[TMP_6:%.*]] = load i8, ptr [[TMP_4]], align 1
43 ; AVX-NEXT: [[TMP_7:%.*]] = load i8, ptr [[TMP_5]], align 1
44 ; AVX-NEXT: [[TMP_8:%.*]] = add i8 [[TMP_6]], [[TMP_7]]
45 ; AVX-NEXT: ret i8 [[TMP_8]]
48 %tmp_0 = zext i8 %v0 to i32
49 %tmp_1 = zext i8 %v1 to i32
50 %tmp_2 = or i32 %tmp_0, 1
51 %tmp_3 = or i32 %tmp_1, 1
52 %tmp_4 = getelementptr inbounds i8, ptr %ptr, i32 %tmp_2
53 %tmp_5 = getelementptr inbounds i8, ptr %ptr, i32 %tmp_3
54 %tmp_6 = load i8, ptr %tmp_4
55 %tmp_7 = load i8, ptr %tmp_5
56 %tmp_8 = add i8 %tmp_6, %tmp_7
60 ; When computing minimum sizes, if we cannot prove the sign bit is zero, we
61 ; have to include one extra bit for signedness since we will sign-extend the
64 ; FIXME: This test is suboptimal since the compuation can be performed in i8.
65 ; In general, we need to add an extra bit to the maximum bit width only
66 ; if we can't prove that the upper bit of the original type is equal to
67 ; the upper bit of the proposed smaller type. If these two bits are the
68 ; same (either zero or one) we know that sign-extending from the smaller
69 ; type will result in the same value. Since we don't yet perform this
70 ; optimization, we make the proposed smaller type (i8) larger (i16) to
73 define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, ptr %ptr) {
74 ; SSE-LABEL: @PR31243_sext(
76 ; SSE-NEXT: [[TMP0:%.*]] = or i8 [[V0:%.*]], 1
77 ; SSE-NEXT: [[TMP1:%.*]] = or i8 [[V1:%.*]], 1
78 ; SSE-NEXT: [[TMP2:%.*]] = sext i8 [[TMP0]] to i64
79 ; SSE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP2]]
80 ; SSE-NEXT: [[TMP3:%.*]] = sext i8 [[TMP1]] to i64
81 ; SSE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP3]]
82 ; SSE-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP4]], align 1
83 ; SSE-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP5]], align 1
84 ; SSE-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
85 ; SSE-NEXT: ret i8 [[TMP8]]
87 ; AVX-LABEL: @PR31243_sext(
89 ; AVX-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> poison, i8 [[V0:%.*]], i64 0
90 ; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i64 1
91 ; AVX-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
92 ; AVX-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i16>
93 ; AVX-NEXT: [[TMP4:%.*]] = extractelement <2 x i16> [[TMP3]], i64 0
94 ; AVX-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i64
95 ; AVX-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[PTR:%.*]], i64 [[TMP5]]
96 ; AVX-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP3]], i64 1
97 ; AVX-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i64
98 ; AVX-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[PTR]], i64 [[TMP7]]
99 ; AVX-NEXT: [[TMP6:%.*]] = load i8, ptr [[TMP4]], align 1
100 ; AVX-NEXT: [[TMP7:%.*]] = load i8, ptr [[TMP5]], align 1
101 ; AVX-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
102 ; AVX-NEXT: ret i8 [[TMP8]]
105 %tmp0 = sext i8 %v0 to i32
106 %tmp1 = sext i8 %v1 to i32
107 %tmp2 = or i32 %tmp0, 1
108 %tmp3 = or i32 %tmp1, 1
109 %tmp4 = getelementptr inbounds i8, ptr %ptr, i32 %tmp2
110 %tmp5 = getelementptr inbounds i8, ptr %ptr, i32 %tmp3
111 %tmp6 = load i8, ptr %tmp4
112 %tmp7 = load i8, ptr %tmp5
113 %tmp8 = add i8 %tmp6, %tmp7