1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -basicaa -slp-vectorizer -dce -S | FileCheck %s --check-prefixes=CHECK,SSE42
3 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -basicaa -slp-vectorizer -dce -S | FileCheck %s --check-prefixes=CHECK,AVX
5 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
7 ; int test_sext_4i8_to_4i32(int * restrict A, char * restrict B) {
14 define i32 @test_sext_4i8_to_4i32(i32* noalias nocapture %A, i8* noalias nocapture %B) {
15 ; CHECK-LABEL: @test_sext_4i8_to_4i32(
17 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[B:%.*]] to <4 x i8>*
18 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
19 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
20 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
21 ; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
22 ; CHECK-NEXT: ret i32 undef
25 %0 = load i8, i8* %B, align 1
26 %conv = sext i8 %0 to i32
27 store i32 %conv, i32* %A, align 4
28 %arrayidx2 = getelementptr inbounds i8, i8* %B, i64 1
29 %1 = load i8, i8* %arrayidx2, align 1
30 %conv3 = sext i8 %1 to i32
31 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
32 store i32 %conv3, i32* %arrayidx4, align 4
33 %arrayidx5 = getelementptr inbounds i8, i8* %B, i64 2
34 %2 = load i8, i8* %arrayidx5, align 1
35 %conv6 = sext i8 %2 to i32
36 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
37 store i32 %conv6, i32* %arrayidx7, align 4
38 %arrayidx8 = getelementptr inbounds i8, i8* %B, i64 3
39 %3 = load i8, i8* %arrayidx8, align 1
40 %conv9 = sext i8 %3 to i32
41 %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
42 store i32 %conv9, i32* %arrayidx10, align 4
46 define i32 @test_zext_4i16_to_4i32(i32* noalias nocapture %A, i16* noalias nocapture %B) {
47 ; CHECK-LABEL: @test_zext_4i16_to_4i32(
49 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[B:%.*]] to <4 x i16>*
50 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 1
51 ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
52 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
53 ; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
54 ; CHECK-NEXT: ret i32 undef
57 %0 = load i16, i16* %B, align 1
58 %conv = zext i16 %0 to i32
59 store i32 %conv, i32* %A, align 4
60 %arrayidx2 = getelementptr inbounds i16, i16* %B, i64 1
61 %1 = load i16, i16* %arrayidx2, align 1
62 %conv3 = zext i16 %1 to i32
63 %arrayidx4 = getelementptr inbounds i32, i32* %A, i64 1
64 store i32 %conv3, i32* %arrayidx4, align 4
65 %arrayidx5 = getelementptr inbounds i16, i16* %B, i64 2
66 %2 = load i16, i16* %arrayidx5, align 1
67 %conv6 = zext i16 %2 to i32
68 %arrayidx7 = getelementptr inbounds i32, i32* %A, i64 2
69 store i32 %conv6, i32* %arrayidx7, align 4
70 %arrayidx8 = getelementptr inbounds i16, i16* %B, i64 3
71 %3 = load i16, i16* %arrayidx8, align 1
72 %conv9 = zext i16 %3 to i32
73 %arrayidx10 = getelementptr inbounds i32, i32* %A, i64 3
74 store i32 %conv9, i32* %arrayidx10, align 4
78 define i64 @test_sext_4i16_to_4i64(i64* noalias nocapture %A, i16* noalias nocapture %B) {
79 ; SSE42-LABEL: @test_sext_4i16_to_4i64(
81 ; SSE42-NEXT: [[TMP0:%.*]] = bitcast i16* [[B:%.*]] to <2 x i16>*
82 ; SSE42-NEXT: [[TMP1:%.*]] = load <2 x i16>, <2 x i16>* [[TMP0]], align 1
83 ; SSE42-NEXT: [[TMP2:%.*]] = sext <2 x i16> [[TMP1]] to <2 x i64>
84 ; SSE42-NEXT: [[TMP3:%.*]] = bitcast i64* [[A:%.*]] to <2 x i64>*
85 ; SSE42-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* [[TMP3]], align 4
86 ; SSE42-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 2
87 ; SSE42-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 2
88 ; SSE42-NEXT: [[TMP4:%.*]] = bitcast i16* [[ARRAYIDX5]] to <2 x i16>*
89 ; SSE42-NEXT: [[TMP5:%.*]] = load <2 x i16>, <2 x i16>* [[TMP4]], align 1
90 ; SSE42-NEXT: [[TMP6:%.*]] = sext <2 x i16> [[TMP5]] to <2 x i64>
91 ; SSE42-NEXT: [[TMP7:%.*]] = bitcast i64* [[ARRAYIDX7]] to <2 x i64>*
92 ; SSE42-NEXT: store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 4
93 ; SSE42-NEXT: ret i64 undef
95 ; AVX-LABEL: @test_sext_4i16_to_4i64(
97 ; AVX-NEXT: [[TMP0:%.*]] = bitcast i16* [[B:%.*]] to <4 x i16>*
98 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 1
99 ; AVX-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i64>
100 ; AVX-NEXT: [[TMP3:%.*]] = bitcast i64* [[A:%.*]] to <4 x i64>*
101 ; AVX-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* [[TMP3]], align 4
102 ; AVX-NEXT: ret i64 undef
105 %0 = load i16, i16* %B, align 1
106 %conv = sext i16 %0 to i64
107 store i64 %conv, i64* %A, align 4
108 %arrayidx2 = getelementptr inbounds i16, i16* %B, i64 1
109 %1 = load i16, i16* %arrayidx2, align 1
110 %conv3 = sext i16 %1 to i64
111 %arrayidx4 = getelementptr inbounds i64, i64* %A, i64 1
112 store i64 %conv3, i64* %arrayidx4, align 4
113 %arrayidx5 = getelementptr inbounds i16, i16* %B, i64 2
114 %2 = load i16, i16* %arrayidx5, align 1
115 %conv6 = sext i16 %2 to i64
116 %arrayidx7 = getelementptr inbounds i64, i64* %A, i64 2
117 store i64 %conv6, i64* %arrayidx7, align 4
118 %arrayidx8 = getelementptr inbounds i16, i16* %B, i64 3
119 %3 = load i16, i16* %arrayidx8, align 1
120 %conv9 = sext i16 %3 to i64
121 %arrayidx10 = getelementptr inbounds i64, i64* %A, i64 3
122 store i64 %conv9, i64* %arrayidx10, align 4