1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -slp-threshold=-6 -slp-vectorizer -instcombine < %s | FileCheck %s
4 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
5 target triple = "x86_64-unknown-linux-gnu"
7 ; These tests ensure that we do not regress due to PR31243. Note that we set
8 ; the SLP threshold to force vectorization even when not profitable.
10 ; When computing minimum sizes, if we can prove the sign bit is zero, we can
11 ; zero-extend the roots back to their original sizes.
13 define i8 @PR31243_zext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8* %ptr) {
14 ; CHECK-LABEL: @PR31243_zext(
16 ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> undef, i8 [[V0:%.*]], i32 0
17 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i32 1
18 ; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
19 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x i8> [[TMP2]], i32 0
20 ; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i64
21 ; CHECK-NEXT: [[TMPE4:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[TMP4]]
22 ; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x i8> [[TMP2]], i32 1
23 ; CHECK-NEXT: [[TMP6:%.*]] = zext i8 [[TMP5]] to i64
24 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[TMP6]]
25 ; CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[TMPE4]], align 1
26 ; CHECK-NEXT: [[TMP7:%.*]] = load i8, i8* [[TMP5]], align 1
27 ; CHECK-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
28 ; CHECK-NEXT: ret i8 [[TMP8]]
31 %tmp0 = zext i8 %v0 to i32
32 %tmp1 = zext i8 %v1 to i32
33 %tmp2 = or i32 %tmp0, 1
34 %tmp3 = or i32 %tmp1, 1
35 %tmp4 = getelementptr inbounds i8, i8* %ptr, i32 %tmp2
36 %tmp5 = getelementptr inbounds i8, i8* %ptr, i32 %tmp3
37 %tmp6 = load i8, i8* %tmp4
38 %tmp7 = load i8, i8* %tmp5
39 %tmp8 = add i8 %tmp6, %tmp7
43 ; When computing minimum sizes, if we cannot prove the sign bit is zero, we
44 ; have to include one extra bit for signedness since we will sign-extend the
47 ; FIXME: This test is suboptimal since the compuation can be performed in i8.
48 ; In general, we need to add an extra bit to the maximum bit width only
49 ; if we can't prove that the upper bit of the original type is equal to
50 ; the upper bit of the proposed smaller type. If these two bits are the
51 ; same (either zero or one) we know that sign-extending from the smaller
52 ; type will result in the same value. Since we don't yet perform this
53 ; optimization, we make the proposed smaller type (i8) larger (i16) to
56 define i8 @PR31243_sext(i8 %v0, i8 %v1, i8 %v2, i8 %v3, i8* %ptr) {
57 ; CHECK-LABEL: @PR31243_sext(
59 ; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x i8> undef, i8 [[V0:%.*]], i32 0
60 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x i8> [[TMP0]], i8 [[V1:%.*]], i32 1
61 ; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], <i8 1, i8 1>
62 ; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i8> [[TMP2]] to <2 x i16>
63 ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x i16> [[TMP3]], i32 0
64 ; CHECK-NEXT: [[TMP5:%.*]] = sext i16 [[TMP4]] to i64
65 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, i8* [[PTR:%.*]], i64 [[TMP5]]
66 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i16> [[TMP3]], i32 1
67 ; CHECK-NEXT: [[TMP7:%.*]] = sext i16 [[TMP6]] to i64
68 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[TMP7]]
69 ; CHECK-NEXT: [[TMP6:%.*]] = load i8, i8* [[TMP4]], align 1
70 ; CHECK-NEXT: [[TMP7:%.*]] = load i8, i8* [[TMP5]], align 1
71 ; CHECK-NEXT: [[TMP8:%.*]] = add i8 [[TMP6]], [[TMP7]]
72 ; CHECK-NEXT: ret i8 [[TMP8]]
75 %tmp0 = sext i8 %v0 to i32
76 %tmp1 = sext i8 %v1 to i32
77 %tmp2 = or i32 %tmp0, 1
78 %tmp3 = or i32 %tmp1, 1
79 %tmp4 = getelementptr inbounds i8, i8* %ptr, i32 %tmp2
80 %tmp5 = getelementptr inbounds i8, i8* %ptr, i32 %tmp3
81 %tmp6 = load i8, i8* %tmp4
82 %tmp7 = load i8, i8* %tmp5
83 %tmp8 = add i8 %tmp6, %tmp7