1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -slp-vectorizer -mattr=+sse2 -S | FileCheck %s --check-prefix=SSE
3 ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -slp-vectorizer -mattr=+avx -S | FileCheck %s --check-prefix=AVX
4 ; RUN: opt < %s -mtriple=x86_64-unknown-linux-gnu -slp-vectorizer -mattr=+avx2 -S | FileCheck %s --check-prefix=AVX
6 %class.1 = type { %class.2 }
7 %class.2 = type { %"class.3" }
8 %"class.3" = type { %"struct.1", i64 }
9 %"struct.1" = type { [8 x i64] }
11 $_ZN1C10SwitchModeEv = comdat any
13 ; Function Attrs: uwtable
14 define void @_ZN1C10SwitchModeEv() local_unnamed_addr #0 comdat align 2 {
15 ; SSE-LABEL: @_ZN1C10SwitchModeEv(
16 ; SSE-NEXT: for.body.lr.ph.i:
17 ; SSE-NEXT: [[OR_1:%.*]] = or i64 undef, 1
18 ; SSE-NEXT: store i64 [[OR_1]], i64* undef, align 8
19 ; SSE-NEXT: [[FOO_1:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 0
20 ; SSE-NEXT: [[FOO_3:%.*]] = load i64, i64* [[FOO_1]], align 8
21 ; SSE-NEXT: [[FOO_2:%.*]] = getelementptr inbounds [[CLASS_1]], %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
22 ; SSE-NEXT: [[FOO_4:%.*]] = load i64, i64* [[FOO_2]], align 8
23 ; SSE-NEXT: [[BAR5:%.*]] = load i64, i64* undef, align 8
24 ; SSE-NEXT: [[AND_2:%.*]] = and i64 [[OR_1]], [[FOO_3]]
25 ; SSE-NEXT: [[AND_1:%.*]] = and i64 [[BAR5]], [[FOO_4]]
26 ; SSE-NEXT: [[BAR3:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 0
27 ; SSE-NEXT: store i64 [[AND_2]], i64* [[BAR3]], align 8
28 ; SSE-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2]], %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 1
29 ; SSE-NEXT: store i64 [[AND_1]], i64* [[BAR4]], align 8
32 ; AVX-LABEL: @_ZN1C10SwitchModeEv(
33 ; AVX-NEXT: for.body.lr.ph.i:
34 ; AVX-NEXT: [[OR_1:%.*]] = or i64 undef, 1
35 ; AVX-NEXT: store i64 [[OR_1]], i64* undef, align 8
36 ; AVX-NEXT: [[FOO_1:%.*]] = getelementptr inbounds [[CLASS_1:%.*]], %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 0
37 ; AVX-NEXT: [[FOO_2:%.*]] = getelementptr inbounds [[CLASS_1]], %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
38 ; AVX-NEXT: [[TMP0:%.*]] = bitcast i64* [[FOO_1]] to <2 x i64>*
39 ; AVX-NEXT: [[TMP1:%.*]] = load <2 x i64>, <2 x i64>* [[TMP0]], align 8
40 ; AVX-NEXT: [[BAR5:%.*]] = load i64, i64* undef, align 8
41 ; AVX-NEXT: [[TMP2:%.*]] = insertelement <2 x i64> poison, i64 [[OR_1]], i32 0
42 ; AVX-NEXT: [[TMP3:%.*]] = insertelement <2 x i64> [[TMP2]], i64 [[BAR5]], i32 1
43 ; AVX-NEXT: [[TMP4:%.*]] = and <2 x i64> [[TMP3]], [[TMP1]]
44 ; AVX-NEXT: [[BAR3:%.*]] = getelementptr inbounds [[CLASS_2:%.*]], %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 0
45 ; AVX-NEXT: [[BAR4:%.*]] = getelementptr inbounds [[CLASS_2]], %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 1
46 ; AVX-NEXT: [[TMP5:%.*]] = bitcast i64* [[BAR3]] to <2 x i64>*
47 ; AVX-NEXT: store <2 x i64> [[TMP4]], <2 x i64>* [[TMP5]], align 8
51 %or.1 = or i64 undef, 1
52 store i64 %or.1, i64* undef, align 8
53 %foo.1 = getelementptr inbounds %class.1, %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 0
54 %foo.3 = load i64, i64* %foo.1, align 8
55 %foo.2 = getelementptr inbounds %class.1, %class.1* undef, i64 0, i32 0, i32 0, i32 0, i32 0, i64 1
56 %foo.4 = load i64, i64* %foo.2, align 8
57 %bar5 = load i64, i64* undef, align 8
58 %and.2 = and i64 %or.1, %foo.3
59 %and.1 = and i64 %bar5, %foo.4
60 %bar3 = getelementptr inbounds %class.2, %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 0
61 store i64 %and.2, i64* %bar3, align 8
62 %bar4 = getelementptr inbounds %class.2, %class.2* undef, i64 0, i32 0, i32 0, i32 0, i64 1
63 store i64 %and.1, i64* %bar4, align 8
67 ; Function Attrs: norecurse nounwind uwtable
68 define void @pr35497() local_unnamed_addr #0 {
69 ; SSE-LABEL: @pr35497(
71 ; SSE-NEXT: [[TMP0:%.*]] = load i64, i64* undef, align 1
72 ; SSE-NEXT: [[AND:%.*]] = shl i64 [[TMP0]], 2
73 ; SSE-NEXT: [[SHL:%.*]] = and i64 [[AND]], 20
74 ; SSE-NEXT: [[ADD:%.*]] = add i64 undef, undef
75 ; SSE-NEXT: store i64 [[ADD]], i64* undef, align 1
76 ; SSE-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 5
77 ; SSE-NEXT: [[AND_1:%.*]] = shl i64 undef, 2
78 ; SSE-NEXT: [[SHL_1:%.*]] = and i64 [[AND_1]], 20
79 ; SSE-NEXT: [[SHR_1:%.*]] = lshr i64 undef, 6
80 ; SSE-NEXT: [[ADD_1:%.*]] = add nuw nsw i64 [[SHL]], [[SHR_1]]
81 ; SSE-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 4
82 ; SSE-NEXT: [[SHR_2:%.*]] = lshr i64 undef, 6
83 ; SSE-NEXT: [[ADD_2:%.*]] = add nuw nsw i64 [[SHL_1]], [[SHR_2]]
84 ; SSE-NEXT: [[AND_4:%.*]] = shl i64 [[ADD]], 2
85 ; SSE-NEXT: [[SHL_4:%.*]] = and i64 [[AND_4]], 20
86 ; SSE-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 1
87 ; SSE-NEXT: store i64 [[ADD_1]], i64* [[ARRAYIDX2_5]], align 1
88 ; SSE-NEXT: [[AND_5:%.*]] = shl nuw nsw i64 [[ADD_1]], 2
89 ; SSE-NEXT: [[SHL_5:%.*]] = and i64 [[AND_5]], 20
90 ; SSE-NEXT: [[SHR_5:%.*]] = lshr i64 [[ADD_1]], 6
91 ; SSE-NEXT: [[ADD_5:%.*]] = add nuw nsw i64 [[SHL_4]], [[SHR_5]]
92 ; SSE-NEXT: store i64 [[ADD_5]], i64* [[ARRAYIDX2_1]], align 1
93 ; SSE-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 0
94 ; SSE-NEXT: store i64 [[ADD_2]], i64* [[ARRAYIDX2_6]], align 1
95 ; SSE-NEXT: [[SHR_6:%.*]] = lshr i64 [[ADD_2]], 6
96 ; SSE-NEXT: [[ADD_6:%.*]] = add nuw nsw i64 [[SHL_5]], [[SHR_6]]
97 ; SSE-NEXT: store i64 [[ADD_6]], i64* [[ARRAYIDX2_2]], align 1
100 ; AVX-LABEL: @pr35497(
102 ; AVX-NEXT: [[TMP0:%.*]] = load i64, i64* undef, align 1
103 ; AVX-NEXT: [[ADD:%.*]] = add i64 undef, undef
104 ; AVX-NEXT: store i64 [[ADD]], i64* undef, align 1
105 ; AVX-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 5
106 ; AVX-NEXT: [[TMP1:%.*]] = insertelement <2 x i64> <i64 undef, i64 poison>, i64 [[TMP0]], i32 1
107 ; AVX-NEXT: [[TMP2:%.*]] = shl <2 x i64> [[TMP1]], <i64 2, i64 2>
108 ; AVX-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], <i64 20, i64 20>
109 ; AVX-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 4
110 ; AVX-NEXT: [[TMP4:%.*]] = add nuw nsw <2 x i64> [[TMP3]], zeroinitializer
111 ; AVX-NEXT: [[ARRAYIDX2_5:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 1
112 ; AVX-NEXT: [[TMP5:%.*]] = extractelement <2 x i64> [[TMP4]], i32 1
113 ; AVX-NEXT: [[TMP6:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i32 0
114 ; AVX-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> [[TMP6]], i64 [[ADD]], i32 1
115 ; AVX-NEXT: [[TMP8:%.*]] = shl <2 x i64> [[TMP7]], <i64 2, i64 2>
116 ; AVX-NEXT: [[TMP9:%.*]] = and <2 x i64> [[TMP8]], <i64 20, i64 20>
117 ; AVX-NEXT: [[ARRAYIDX2_6:%.*]] = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 0
118 ; AVX-NEXT: [[TMP10:%.*]] = bitcast i64* [[ARRAYIDX2_6]] to <2 x i64>*
119 ; AVX-NEXT: store <2 x i64> [[TMP4]], <2 x i64>* [[TMP10]], align 1
120 ; AVX-NEXT: [[TMP11:%.*]] = lshr <2 x i64> [[TMP4]], <i64 6, i64 6>
121 ; AVX-NEXT: [[TMP12:%.*]] = add nuw nsw <2 x i64> [[TMP9]], [[TMP11]]
122 ; AVX-NEXT: [[TMP13:%.*]] = bitcast i64* [[ARRAYIDX2_2]] to <2 x i64>*
123 ; AVX-NEXT: store <2 x i64> [[TMP12]], <2 x i64>* [[TMP13]], align 1
127 %0 = load i64, i64* undef, align 1
129 %shl = and i64 %and, 20
130 %add = add i64 undef, undef
131 store i64 %add, i64* undef, align 1
132 %arrayidx2.1 = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 5
133 %and.1 = shl i64 undef, 2
134 %shl.1 = and i64 %and.1, 20
135 %shr.1 = lshr i64 undef, 6
136 %add.1 = add nuw nsw i64 %shl, %shr.1
137 %arrayidx2.2 = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 4
138 %shr.2 = lshr i64 undef, 6
139 %add.2 = add nuw nsw i64 %shl.1, %shr.2
140 %and.4 = shl i64 %add, 2
141 %shl.4 = and i64 %and.4, 20
142 %arrayidx2.5 = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 1
143 store i64 %add.1, i64* %arrayidx2.5, align 1
144 %and.5 = shl nuw nsw i64 %add.1, 2
145 %shl.5 = and i64 %and.5, 20
146 %shr.5 = lshr i64 %add.1, 6
147 %add.5 = add nuw nsw i64 %shl.4, %shr.5
148 store i64 %add.5, i64* %arrayidx2.1, align 1
149 %arrayidx2.6 = getelementptr inbounds [0 x i64], [0 x i64]* undef, i64 0, i64 0
150 store i64 %add.2, i64* %arrayidx2.6, align 1
151 %shr.6 = lshr i64 %add.2, 6
152 %add.6 = add nuw nsw i64 %shl.5, %shr.6
153 store i64 %add.6, i64* %arrayidx2.2, align 1