1 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+sse2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOT_AVX2 --check-prefix=SSE2
3 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=NOT_AVX2 --check-prefix=AVX1
4 # RUN: llc -mtriple=x86_64-linux-gnu -mattr=+avx2 -run-pass=legalizer %s -o - | FileCheck %s --check-prefix=ALL --check-prefix=AVX2
7 define void @test_add_v32i8() {
8 %ret = add <32 x i8> undef, undef
12 define void @test_add_v16i16() {
13 %ret = add <16 x i16> undef, undef
17 define void @test_add_v8i32() {
18 %ret = add <8 x i32> undef, undef
22 define void @test_add_v4i64() {
23 %ret = add <4 x i64> undef, undef
32 regBankSelected: false
41 ; CHECK-LABEL: name: test_add_v32i8
42 ; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
43 ; ALL: [[DEF1:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
44 ; SSE2: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
45 ; SSE2: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
46 ; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF]](<32 x s8>)
47 ; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[DEF1]](<32 x s8>)
48 ; SSE2: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
49 ; SSE2: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
50 ; AVX1: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV]], [[UV2]]
51 ; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
52 ; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
53 ; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_CONCAT_VECTORS [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
54 ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>)
55 ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
56 ; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
57 ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
59 %0(<32 x s8>) = IMPLICIT_DEF
60 %1(<32 x s8>) = IMPLICIT_DEF
61 %2(<32 x s8>) = G_ADD %0, %1
70 regBankSelected: false
79 ; ALL-LABEL: name: test_add_v16i16
80 ; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
81 ; ALL: [[DEF1:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
82 ; SSE2: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
83 ; SSE2: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
84 ; SSE2: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
85 ; SSE2: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
86 ; SSE2: [[MV:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
87 ; AVX1: [[UV:%[0-9]+]]:_(<8 x s16>), [[UV1:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF]](<16 x s16>)
88 ; AVX1: [[UV2:%[0-9]+]]:_(<8 x s16>), [[UV3:%[0-9]+]]:_(<8 x s16>) = G_UNMERGE_VALUES [[DEF1]](<16 x s16>)
89 ; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
90 ; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
91 ; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
92 ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>)
93 ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>)
94 ; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
95 ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
97 %0(<16 x s16>) = IMPLICIT_DEF
98 %1(<16 x s16>) = IMPLICIT_DEF
99 %2(<16 x s16>) = G_ADD %0, %1
108 regBankSelected: false
110 - { id: 0, class: _ }
111 - { id: 1, class: _ }
112 - { id: 2, class: _ }
115 liveins: $ymm0, $ymm1
117 ; ALL-LABEL: name: test_add_v8i32
118 ; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
119 ; ALL: [[DEF1:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
120 ; SSE2: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
121 ; SSE2: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
122 ; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
123 ; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
124 ; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
125 ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>)
126 ; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
127 ; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
128 ; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
129 ; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
130 ; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
131 ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>)
132 ; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
133 ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
135 %0(<8 x s32>) = IMPLICIT_DEF
136 %1(<8 x s32>) = IMPLICIT_DEF
137 %2(<8 x s32>) = G_ADD %0, %1
146 regBankSelected: false
148 - { id: 0, class: _ }
149 - { id: 1, class: _ }
150 - { id: 2, class: _ }
153 liveins: $ymm0, $ymm1
155 ; ALL-LABEL: name: test_add_v4i64
156 ; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
157 ; ALL: [[DEF1:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
158 ; SSE2: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
159 ; SSE2: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
160 ; SSE2: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
161 ; SSE2: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
162 ; SSE2: [[MV:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
163 ; AVX1: [[UV:%[0-9]+]]:_(<2 x s64>), [[UV1:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF]](<4 x s64>)
164 ; AVX1: [[UV2:%[0-9]+]]:_(<2 x s64>), [[UV3:%[0-9]+]]:_(<2 x s64>) = G_UNMERGE_VALUES [[DEF1]](<4 x s64>)
165 ; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
166 ; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
167 ; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
168 ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>)
169 ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>)
170 ; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
171 ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
173 %0(<4 x s64>) = IMPLICIT_DEF
174 %1(<4 x s64>) = IMPLICIT_DEF
175 %2(<4 x s64>) = G_ADD %0, %1