1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX1
3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F
4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx512f,+avx512bw -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
7 define void @test_add_v64i8() {
8 %ret = add <64 x i8> undef, undef
12 define void @test_add_v32i16() {
13 %ret = add <32 x i16> undef, undef
17 define void @test_add_v16i32() {
18 %ret = add <16 x i32> undef, undef
22 define void @test_add_v8i64() {
23 %ret = add <8 x i64> undef, undef
27 define <64 x i8> @test_add_v64i8_2(<64 x i8> %arg1, <64 x i8> %arg2) #0 {
28 %ret = add <64 x i8> %arg1, %arg2
36 regBankSelected: false
45 ; ALL-LABEL: name: test_add_v64i8
46 ; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
47 ; ALL: [[DEF1:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
48 ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>), [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
49 ; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>), [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
50 ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
51 ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
52 ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
53 ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
54 ; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
55 ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>)
56 ; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
57 ; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
58 ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
59 ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
60 ; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
61 ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>)
62 ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
63 ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>)
65 %0(<64 x s8>) = IMPLICIT_DEF
66 %1(<64 x s8>) = IMPLICIT_DEF
67 %2(<64 x s8>) = G_ADD %0, %1
76 regBankSelected: false
85 ; ALL-LABEL: name: test_add_v32i16
86 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
87 ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
88 ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>), [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
89 ; AVX1: [[UV4:%[0-9]+]]:_(<8 x s16>), [[UV5:%[0-9]+]]:_(<8 x s16>), [[UV6:%[0-9]+]]:_(<8 x s16>), [[UV7:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
90 ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV4]]
91 ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV5]]
92 ; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
93 ; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
94 ; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
95 ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>)
96 ; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
97 ; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
98 ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
99 ; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
100 ; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_CONCAT_VECTORS [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
101 ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>)
102 ; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
103 ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>)
105 %0(<32 x s16>) = IMPLICIT_DEF
106 %1(<32 x s16>) = IMPLICIT_DEF
107 %2(<32 x s16>) = G_ADD %0, %1
113 name: test_add_v16i32
116 regBankSelected: false
118 - { id: 0, class: _ }
119 - { id: 1, class: _ }
120 - { id: 2, class: _ }
123 liveins: $zmm0, $zmm1
125 ; ALL-LABEL: name: test_add_v16i32
126 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
127 ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
128 ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>), [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<16 x s32>)
129 ; AVX1: [[UV4:%[0-9]+]]:_(<4 x s32>), [[UV5:%[0-9]+]]:_(<4 x s32>), [[UV6:%[0-9]+]]:_(<4 x s32>), [[UV7:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<16 x s32>)
130 ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV4]]
131 ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV5]]
132 ; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
133 ; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
134 ; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
135 ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>)
136 ; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
137 ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>)
138 ; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
139 ; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>)
141 %0(<16 x s32>) = IMPLICIT_DEF
142 %1(<16 x s32>) = IMPLICIT_DEF
143 %2(<16 x s32>) = G_ADD %0, %1
152 regBankSelected: false
154 - { id: 0, class: _ }
155 - { id: 1, class: _ }
156 - { id: 2, class: _ }
159 liveins: $zmm0, $zmm1
161 ; ALL-LABEL: name: test_add_v8i64
162 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
163 ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
164 ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>), [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<8 x s64>)
165 ; AVX1: [[UV4:%[0-9]+]]:_(<2 x s64>), [[UV5:%[0-9]+]]:_(<2 x s64>), [[UV6:%[0-9]+]]:_(<2 x s64>), [[UV7:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<8 x s64>)
166 ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV4]]
167 ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV5]]
168 ; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
169 ; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
170 ; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
171 ; AVX1: $zmm0 = COPY [[MV]](<8 x s64>)
172 ; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
173 ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>)
174 ; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
175 ; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>)
177 %0(<8 x s64>) = IMPLICIT_DEF
178 %1(<8 x s64>) = IMPLICIT_DEF
179 %2(<8 x s64>) = G_ADD %0, %1
185 name: test_add_v64i8_2
188 regBankSelected: false
190 - { id: 0, class: _ }
191 - { id: 1, class: _ }
192 - { id: 2, class: _ }
193 - { id: 3, class: _ }
194 - { id: 4, class: _ }
195 - { id: 5, class: _ }
196 - { id: 6, class: _ }
197 - { id: 7, class: _ }
198 - { id: 8, class: _ }
203 liveins: $ymm0, $ymm1, $ymm2, $ymm3
205 ; ALL-LABEL: name: test_add_v64i8_2
206 ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
207 ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
208 ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
209 ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
210 ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
211 ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
212 ; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
213 ; AVX1: [[UV6:%[0-9]+]]:_(<16 x s8>), [[UV7:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY3]](<32 x s8>)
214 ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV4]]
215 ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV5]]
216 ; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
217 ; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
218 ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
219 ; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
220 ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
221 ; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>)
222 ; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
223 ; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
224 ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>)
225 ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>)
226 ; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
227 ; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_CONCAT_VECTORS [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
228 ; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]]
229 ; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
230 ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>)
231 ; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>)
232 ; ALL: RET 0, implicit $ymm0, implicit $ymm1
233 %2(<32 x s8>) = COPY $ymm0
234 %3(<32 x s8>) = COPY $ymm1
235 %4(<32 x s8>) = COPY $ymm2
236 %5(<32 x s8>) = COPY $ymm3
237 %0(<64 x s8>) = G_CONCAT_VECTORS %2(<32 x s8>), %3(<32 x s8>)
238 %1(<64 x s8>) = G_CONCAT_VECTORS %4(<32 x s8>), %5(<32 x s8>)
239 %6(<64 x s8>) = G_ADD %0, %1
240 %7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>)
241 $ymm0 = COPY %7(<32 x s8>)
242 $ymm1 = COPY %8(<32 x s8>)
243 RET 0, implicit $ymm0, implicit $ymm1