1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=SSE2
3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX1
4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=AVX2
6 # TODO: add tests for additional configuration after the legalization supported
9 define void @test_add_v32i8() {
10 %ret = add <32 x i8> undef, undef
14 define void @test_add_v16i16() {
15 %ret = add <16 x i16> undef, undef
19 define void @test_add_v8i32() {
20 %ret = add <8 x i32> undef, undef
24 define void @test_add_v4i64() {
25 %ret = add <4 x i64> undef, undef
34 regBankSelected: false
43 ; SSE2-LABEL: name: test_add_v32i8
44 ; SSE2: liveins: $ymm0, $ymm1
46 ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
47 ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
48 ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
49 ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
50 ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
51 ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
52 ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
53 ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
56 ; AVX1-LABEL: name: test_add_v32i8
57 ; AVX1: liveins: $ymm0, $ymm1
59 ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
60 ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
61 ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
62 ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
63 ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
64 ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
65 ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
66 ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<32 x s8>)
69 ; AVX2-LABEL: name: test_add_v32i8
70 ; AVX2: liveins: $ymm0, $ymm1
72 ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
73 ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
74 ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
75 ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<32 x s8>)
77 %0(<32 x s8>) = IMPLICIT_DEF
78 %1(<32 x s8>) = IMPLICIT_DEF
79 %2(<32 x s8>) = G_ADD %0, %1
88 regBankSelected: false
97 ; SSE2-LABEL: name: test_add_v16i16
98 ; SSE2: liveins: $ymm0, $ymm1
100 ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
101 ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
102 ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
103 ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
104 ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
105 ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
106 ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
107 ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
110 ; AVX1-LABEL: name: test_add_v16i16
111 ; AVX1: liveins: $ymm0, $ymm1
113 ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
114 ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
115 ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
116 ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
117 ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
118 ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
119 ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
120 ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<16 x s16>)
123 ; AVX2-LABEL: name: test_add_v16i16
124 ; AVX2: liveins: $ymm0, $ymm1
126 ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
127 ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
128 ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
129 ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<16 x s16>)
131 %0(<16 x s16>) = IMPLICIT_DEF
132 %1(<16 x s16>) = IMPLICIT_DEF
133 %2(<16 x s16>) = G_ADD %0, %1
142 regBankSelected: false
144 - { id: 0, class: _ }
145 - { id: 1, class: _ }
146 - { id: 2, class: _ }
149 liveins: $ymm0, $ymm1
151 ; SSE2-LABEL: name: test_add_v8i32
152 ; SSE2: liveins: $ymm0, $ymm1
154 ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
155 ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
156 ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
157 ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
158 ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
159 ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
160 ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
161 ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
164 ; AVX1-LABEL: name: test_add_v8i32
165 ; AVX1: liveins: $ymm0, $ymm1
167 ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
168 ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
169 ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
170 ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
171 ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
172 ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
173 ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
174 ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<8 x s32>)
177 ; AVX2-LABEL: name: test_add_v8i32
178 ; AVX2: liveins: $ymm0, $ymm1
180 ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
181 ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
182 ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
183 ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<8 x s32>)
185 %0(<8 x s32>) = IMPLICIT_DEF
186 %1(<8 x s32>) = IMPLICIT_DEF
187 %2(<8 x s32>) = G_ADD %0, %1
196 regBankSelected: false
198 - { id: 0, class: _ }
199 - { id: 1, class: _ }
200 - { id: 2, class: _ }
203 liveins: $ymm0, $ymm1
205 ; SSE2-LABEL: name: test_add_v4i64
206 ; SSE2: liveins: $ymm0, $ymm1
208 ; SSE2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
209 ; SSE2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
210 ; SSE2-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
211 ; SSE2-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
212 ; SSE2-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
213 ; SSE2-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
214 ; SSE2-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
215 ; SSE2-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
218 ; AVX1-LABEL: name: test_add_v4i64
219 ; AVX1: liveins: $ymm0, $ymm1
221 ; AVX1-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
222 ; AVX1-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
223 ; AVX1-NEXT: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
224 ; AVX1-NEXT: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
225 ; AVX1-NEXT: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
226 ; AVX1-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
227 ; AVX1-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
228 ; AVX1-NEXT: $ymm0 = COPY [[CONCAT_VECTORS]](<4 x s64>)
231 ; AVX2-LABEL: name: test_add_v4i64
232 ; AVX2: liveins: $ymm0, $ymm1
234 ; AVX2-NEXT: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
235 ; AVX2-NEXT: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
236 ; AVX2-NEXT: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
237 ; AVX2-NEXT: $ymm0 = COPY [[ADD]](<4 x s64>)
239 %0(<4 x s64>) = IMPLICIT_DEF
240 %1(<4 x s64>) = IMPLICIT_DEF
241 %2(<4 x s64>) = G_ADD %0, %1