Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / AArch64 / neon-extract.c
blobe5699f813131fa456ca5e54219c0ceba1227f95a
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
4 // REQUIRES: aarch64-registered-target
6 #include <arm_neon.h>
8 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vext_s8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 {
9 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
10 // CHECK: ret <8 x i8> [[VEXT]]
11 int8x8_t test_vext_s8(int8x8_t a, int8x8_t b) {
12 return vext_s8(a, b, 2);
15 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vext_s16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 {
16 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
17 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
18 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
19 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
20 // CHECK: [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
21 // CHECK: ret <4 x i16> [[VEXT]]
22 int16x4_t test_vext_s16(int16x4_t a, int16x4_t b) {
23 return vext_s16(a, b, 3);
26 // CHECK-LABEL: define{{.*}} <2 x i32> @test_vext_s32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 {
27 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
28 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
29 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
30 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
31 // CHECK: [[VEXT:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 1, i32 2>
32 // CHECK: ret <2 x i32> [[VEXT]]
33 int32x2_t test_vext_s32(int32x2_t a, int32x2_t b) {
34 return vext_s32(a, b, 1);
37 // CHECK-LABEL: define{{.*}} <1 x i64> @test_vext_s64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 {
38 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
39 // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
40 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
41 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64>
42 // CHECK: [[VEXT:%.*]] = shufflevector <1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i32> zeroinitializer
43 // CHECK: ret <1 x i64> [[VEXT]]
44 int64x1_t test_vext_s64(int64x1_t a, int64x1_t b) {
45 return vext_s64(a, b, 0);
48 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_s8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 {
49 // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
50 // CHECK: ret <16 x i8> [[VEXT]]
51 int8x16_t test_vextq_s8(int8x16_t a, int8x16_t b) {
52 return vextq_s8(a, b, 2);
55 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_s16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 {
56 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
57 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
58 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
59 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
60 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
61 // CHECK: ret <8 x i16> [[VEXT]]
62 int16x8_t test_vextq_s16(int16x8_t a, int16x8_t b) {
63 return vextq_s16(a, b, 3);
66 // CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_s32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 {
67 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
68 // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
69 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
70 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
71 // CHECK: [[VEXT:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
72 // CHECK: ret <4 x i32> [[VEXT]]
73 int32x4_t test_vextq_s32(int32x4_t a, int32x4_t b) {
74 return vextq_s32(a, b, 1);
77 // CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_s64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 {
78 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
79 // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8>
80 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
81 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
82 // CHECK: [[VEXT:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i32> <i32 1, i32 2>
83 // CHECK: ret <2 x i64> [[VEXT]]
84 int64x2_t test_vextq_s64(int64x2_t a, int64x2_t b) {
85 return vextq_s64(a, b, 1);
88 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vext_u8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 {
89 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
90 // CHECK: ret <8 x i8> [[VEXT]]
91 uint8x8_t test_vext_u8(uint8x8_t a, uint8x8_t b) {
92 return vext_u8(a, b, 2);
95 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vext_u16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 {
96 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
97 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
98 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
99 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
100 // CHECK: [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
101 // CHECK: ret <4 x i16> [[VEXT]]
102 uint16x4_t test_vext_u16(uint16x4_t a, uint16x4_t b) {
103 return vext_u16(a, b, 3);
106 // CHECK-LABEL: define{{.*}} <2 x i32> @test_vext_u32(<2 x i32> noundef %a, <2 x i32> noundef %b) #0 {
107 // CHECK: [[TMP0:%.*]] = bitcast <2 x i32> %a to <8 x i8>
108 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> %b to <8 x i8>
109 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x i32>
110 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
111 // CHECK: [[VEXT:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP3]], <2 x i32> <i32 1, i32 2>
112 // CHECK: ret <2 x i32> [[VEXT]]
113 uint32x2_t test_vext_u32(uint32x2_t a, uint32x2_t b) {
114 return vext_u32(a, b, 1);
117 // CHECK-LABEL: define{{.*}} <1 x i64> @test_vext_u64(<1 x i64> noundef %a, <1 x i64> noundef %b) #0 {
118 // CHECK: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
119 // CHECK: [[TMP1:%.*]] = bitcast <1 x i64> %b to <8 x i8>
120 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64>
121 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x i64>
122 // CHECK: [[VEXT:%.*]] = shufflevector <1 x i64> [[TMP2]], <1 x i64> [[TMP3]], <1 x i32> zeroinitializer
123 // CHECK: ret <1 x i64> [[VEXT]]
124 uint64x1_t test_vext_u64(uint64x1_t a, uint64x1_t b) {
125 return vext_u64(a, b, 0);
128 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_u8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 {
129 // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
130 // CHECK: ret <16 x i8> [[VEXT]]
131 uint8x16_t test_vextq_u8(uint8x16_t a, uint8x16_t b) {
132 return vextq_u8(a, b, 2);
135 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_u16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 {
136 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
137 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
138 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
139 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
140 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
141 // CHECK: ret <8 x i16> [[VEXT]]
142 uint16x8_t test_vextq_u16(uint16x8_t a, uint16x8_t b) {
143 return vextq_u16(a, b, 3);
146 // CHECK-LABEL: define{{.*}} <4 x i32> @test_vextq_u32(<4 x i32> noundef %a, <4 x i32> noundef %b) #0 {
147 // CHECK: [[TMP0:%.*]] = bitcast <4 x i32> %a to <16 x i8>
148 // CHECK: [[TMP1:%.*]] = bitcast <4 x i32> %b to <16 x i8>
149 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x i32>
150 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
151 // CHECK: [[VEXT:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
152 // CHECK: ret <4 x i32> [[VEXT]]
153 uint32x4_t test_vextq_u32(uint32x4_t a, uint32x4_t b) {
154 return vextq_u32(a, b, 1);
157 // CHECK-LABEL: define{{.*}} <2 x i64> @test_vextq_u64(<2 x i64> noundef %a, <2 x i64> noundef %b) #0 {
158 // CHECK: [[TMP0:%.*]] = bitcast <2 x i64> %a to <16 x i8>
159 // CHECK: [[TMP1:%.*]] = bitcast <2 x i64> %b to <16 x i8>
160 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x i64>
161 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
162 // CHECK: [[VEXT:%.*]] = shufflevector <2 x i64> [[TMP2]], <2 x i64> [[TMP3]], <2 x i32> <i32 1, i32 2>
163 // CHECK: ret <2 x i64> [[VEXT]]
164 uint64x2_t test_vextq_u64(uint64x2_t a, uint64x2_t b) {
165 return vextq_u64(a, b, 1);
168 // CHECK-LABEL: define{{.*}} <2 x float> @test_vext_f32(<2 x float> noundef %a, <2 x float> noundef %b) #0 {
169 // CHECK: [[TMP0:%.*]] = bitcast <2 x float> %a to <8 x i8>
170 // CHECK: [[TMP1:%.*]] = bitcast <2 x float> %b to <8 x i8>
171 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <2 x float>
172 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x float>
173 // CHECK: [[VEXT:%.*]] = shufflevector <2 x float> [[TMP2]], <2 x float> [[TMP3]], <2 x i32> <i32 1, i32 2>
174 // CHECK: ret <2 x float> [[VEXT]]
175 float32x2_t test_vext_f32(float32x2_t a, float32x2_t b) {
176 return vext_f32(a, b, 1);
179 // CHECK-LABEL: define{{.*}} <1 x double> @test_vext_f64(<1 x double> noundef %a, <1 x double> noundef %b) #0 {
180 // CHECK: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
181 // CHECK: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
182 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x double>
183 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <1 x double>
184 // CHECK: [[VEXT:%.*]] = shufflevector <1 x double> [[TMP2]], <1 x double> [[TMP3]], <1 x i32> zeroinitializer
185 // CHECK: ret <1 x double> [[VEXT]]
186 float64x1_t test_vext_f64(float64x1_t a, float64x1_t b) {
187 return vext_f64(a, b, 0);
190 // CHECK-LABEL: define{{.*}} <4 x float> @test_vextq_f32(<4 x float> noundef %a, <4 x float> noundef %b) #0 {
191 // CHECK: [[TMP0:%.*]] = bitcast <4 x float> %a to <16 x i8>
192 // CHECK: [[TMP1:%.*]] = bitcast <4 x float> %b to <16 x i8>
193 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <4 x float>
194 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x float>
195 // CHECK: [[VEXT:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> <i32 1, i32 2, i32 3, i32 4>
196 // CHECK: ret <4 x float> [[VEXT]]
197 float32x4_t test_vextq_f32(float32x4_t a, float32x4_t b) {
198 return vextq_f32(a, b, 1);
201 // CHECK-LABEL: define{{.*}} <2 x double> @test_vextq_f64(<2 x double> noundef %a, <2 x double> noundef %b) #0 {
202 // CHECK: [[TMP0:%.*]] = bitcast <2 x double> %a to <16 x i8>
203 // CHECK: [[TMP1:%.*]] = bitcast <2 x double> %b to <16 x i8>
204 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <2 x double>
205 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double>
206 // CHECK: [[VEXT:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP3]], <2 x i32> <i32 1, i32 2>
207 // CHECK: ret <2 x double> [[VEXT]]
208 float64x2_t test_vextq_f64(float64x2_t a, float64x2_t b) {
209 return vextq_f64(a, b, 1);
212 // CHECK-LABEL: define{{.*}} <8 x i8> @test_vext_p8(<8 x i8> noundef %a, <8 x i8> noundef %b) #0 {
213 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i8> %a, <8 x i8> %b, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9>
214 // CHECK: ret <8 x i8> [[VEXT]]
215 poly8x8_t test_vext_p8(poly8x8_t a, poly8x8_t b) {
216 return vext_p8(a, b, 2);
219 // CHECK-LABEL: define{{.*}} <4 x i16> @test_vext_p16(<4 x i16> noundef %a, <4 x i16> noundef %b) #0 {
220 // CHECK: [[TMP0:%.*]] = bitcast <4 x i16> %a to <8 x i8>
221 // CHECK: [[TMP1:%.*]] = bitcast <4 x i16> %b to <8 x i8>
222 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP0]] to <4 x i16>
223 // CHECK: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP1]] to <4 x i16>
224 // CHECK: [[VEXT:%.*]] = shufflevector <4 x i16> [[TMP2]], <4 x i16> [[TMP3]], <4 x i32> <i32 3, i32 4, i32 5, i32 6>
225 // CHECK: ret <4 x i16> [[VEXT]]
226 poly16x4_t test_vext_p16(poly16x4_t a, poly16x4_t b) {
227 return vext_p16(a, b, 3);
230 // CHECK-LABEL: define{{.*}} <16 x i8> @test_vextq_p8(<16 x i8> noundef %a, <16 x i8> noundef %b) #0 {
231 // CHECK: [[VEXT:%.*]] = shufflevector <16 x i8> %a, <16 x i8> %b, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17>
232 // CHECK: ret <16 x i8> [[VEXT]]
233 poly8x16_t test_vextq_p8(poly8x16_t a, poly8x16_t b) {
234 return vextq_p8(a, b, 2);
237 // CHECK-LABEL: define{{.*}} <8 x i16> @test_vextq_p16(<8 x i16> noundef %a, <8 x i16> noundef %b) #0 {
238 // CHECK: [[TMP0:%.*]] = bitcast <8 x i16> %a to <16 x i8>
239 // CHECK: [[TMP1:%.*]] = bitcast <8 x i16> %b to <16 x i8>
240 // CHECK: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP0]] to <8 x i16>
241 // CHECK: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <8 x i16>
242 // CHECK: [[VEXT:%.*]] = shufflevector <8 x i16> [[TMP2]], <8 x i16> [[TMP3]], <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
243 // CHECK: ret <8 x i16> [[VEXT]]
244 poly16x8_t test_vextq_p16(poly16x8_t a, poly16x8_t b) {
245 return vextq_p16(a, b, 3);