1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple armv8-arm-none-eabi \
3 // RUN: -target-feature +neon -target-feature +bf16 -mfloat-abi soft \
4 // RUN: -disable-O0-optnone -S -emit-llvm -o - %s \
5 // RUN: | opt -S -passes=mem2reg | FileCheck %s
6 // RUN: %clang_cc1 -triple armv8-arm-none-eabi \
7 // RUN: -target-feature +neon -target-feature +bf16 -mfloat-abi hard \
8 // RUN: -disable-O0-optnone -S -emit-llvm -o - %s \
9 // RUN: | opt -S -passes=mem2reg | FileCheck %s
11 // REQUIRES: aarch64-registered-target || arm-registered-target
15 // CHECK-LABEL: @test_vbfdot_f32(
17 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <2 x float> [[R:%.*]] to <8 x i8>
18 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
19 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x bfloat> [[B:%.*]] to <8 x i8>
20 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <2 x float> @llvm.arm.neon.bfdot.v2f32.v4bf16(<2 x float> [[R]], <4 x bfloat> [[A]], <4 x bfloat> [[B]])
21 // CHECK-NEXT: ret <2 x float> [[VBFDOT3_I]]
23 float32x2_t
test_vbfdot_f32(float32x2_t r
, bfloat16x4_t a
, bfloat16x4_t b
) {
24 return vbfdot_f32(r
, a
, b
);
27 // CHECK-LABEL: @test_vbfdotq_f32(
29 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
30 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
31 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[B:%.*]] to <16 x i8>
32 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfdot.v4f32.v8bf16(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[B]])
33 // CHECK-NEXT: ret <4 x float> [[VBFDOT3_I]]
35 float32x4_t
test_vbfdotq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
){
36 return vbfdotq_f32(r
, a
, b
);
39 // CHECK-LABEL: @test_vbfdot_lane_f32(
41 // CHECK-NEXT: [[__REINT_144:%.*]] = alloca <4 x bfloat>, align 8
42 // CHECK-NEXT: [[__REINT1_144:%.*]] = alloca <2 x float>, align 8
43 // CHECK-NEXT: store <4 x bfloat> [[B:%.*]], ptr [[__REINT_144]], align 8
44 // CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[__REINT_144]], align 8
45 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
46 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
47 // CHECK-NEXT: [[LANE:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP3]], <2 x i32> zeroinitializer
48 // CHECK-NEXT: store <2 x float> [[LANE]], ptr [[__REINT1_144]], align 8
49 // CHECK-NEXT: [[TMP5:%.*]] = load <4 x bfloat>, ptr [[__REINT1_144]], align 8
50 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x float> [[R:%.*]] to <8 x i8>
51 // CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
52 // CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x bfloat> [[TMP5]] to <8 x i8>
53 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <2 x float> @llvm.arm.neon.bfdot.v2f32.v4bf16(<2 x float> [[R]], <4 x bfloat> [[A]], <4 x bfloat> [[TMP5]])
54 // CHECK-NEXT: ret <2 x float> [[VBFDOT3_I]]
56 float32x2_t
test_vbfdot_lane_f32(float32x2_t r
, bfloat16x4_t a
, bfloat16x4_t b
){
57 return vbfdot_lane_f32(r
, a
, b
, 0);
60 // CHECK-LABEL: @test_vbfdotq_laneq_f32(
62 // CHECK-NEXT: [[__REINT_146:%.*]] = alloca <8 x bfloat>, align 8
63 // CHECK-NEXT: [[__REINT1_146:%.*]] = alloca <4 x float>, align 8
64 // CHECK-NEXT: store <8 x bfloat> [[B:%.*]], ptr [[__REINT_146]], align 8
65 // CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[__REINT_146]], align 8
66 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to <16 x i8>
67 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
68 // CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> [[TMP3]], <4 x i32> <i32 3, i32 3, i32 3, i32 3>
69 // CHECK-NEXT: store <4 x float> [[LANE]], ptr [[__REINT1_146]], align 8
70 // CHECK-NEXT: [[TMP5:%.*]] = load <8 x bfloat>, ptr [[__REINT1_146]], align 8
71 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
72 // CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
73 // CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x bfloat> [[TMP5]] to <16 x i8>
74 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfdot.v4f32.v8bf16(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[TMP5]])
75 // CHECK-NEXT: ret <4 x float> [[VBFDOT3_I]]
77 float32x4_t
test_vbfdotq_laneq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
78 return vbfdotq_laneq_f32(r
, a
, b
, 3);
81 // CHECK-LABEL: @test_vbfdot_laneq_f32(
83 // CHECK-NEXT: [[__REINT_148:%.*]] = alloca <8 x bfloat>, align 8
84 // CHECK-NEXT: [[__REINT1_148:%.*]] = alloca <2 x float>, align 8
85 // CHECK-NEXT: store <8 x bfloat> [[B:%.*]], ptr [[__REINT_148]], align 8
86 // CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[__REINT_148]], align 8
87 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <4 x float> [[TMP1]] to <16 x i8>
88 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP2]] to <4 x float>
89 // CHECK-NEXT: [[LANE:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> [[TMP3]], <2 x i32> <i32 3, i32 3>
90 // CHECK-NEXT: store <2 x float> [[LANE]], ptr [[__REINT1_148]], align 8
91 // CHECK-NEXT: [[TMP5:%.*]] = load <4 x bfloat>, ptr [[__REINT1_148]], align 8
92 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <2 x float> [[R:%.*]] to <8 x i8>
93 // CHECK-NEXT: [[TMP7:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
94 // CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x bfloat> [[TMP5]] to <8 x i8>
95 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <2 x float> @llvm.arm.neon.bfdot.v2f32.v4bf16(<2 x float> [[R]], <4 x bfloat> [[A]], <4 x bfloat> [[TMP5]])
96 // CHECK-NEXT: ret <2 x float> [[VBFDOT3_I]]
98 float32x2_t
test_vbfdot_laneq_f32(float32x2_t r
, bfloat16x4_t a
, bfloat16x8_t b
) {
99 return vbfdot_laneq_f32(r
, a
, b
, 3);
102 // CHECK-LABEL: @test_vbfdotq_lane_f32(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[__REINT_142:%.*]] = alloca <4 x bfloat>, align 8
105 // CHECK-NEXT: [[__REINT1_142:%.*]] = alloca <4 x float>, align 8
106 // CHECK-NEXT: store <4 x bfloat> [[B:%.*]], ptr [[__REINT_142]], align 8
107 // CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[__REINT_142]], align 8
108 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x float> [[TMP1]] to <8 x i8>
109 // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8> [[TMP2]] to <2 x float>
110 // CHECK-NEXT: [[LANE:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> [[TMP3]], <4 x i32> zeroinitializer
111 // CHECK-NEXT: store <4 x float> [[LANE]], ptr [[__REINT1_142]], align 8
112 // CHECK-NEXT: [[TMP5:%.*]] = load <8 x bfloat>, ptr [[__REINT1_142]], align 8
113 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
114 // CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
115 // CHECK-NEXT: [[TMP8:%.*]] = bitcast <8 x bfloat> [[TMP5]] to <16 x i8>
116 // CHECK-NEXT: [[VBFDOT3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfdot.v4f32.v8bf16(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[TMP5]])
117 // CHECK-NEXT: ret <4 x float> [[VBFDOT3_I]]
119 float32x4_t
test_vbfdotq_lane_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x4_t b
) {
120 return vbfdotq_lane_f32(r
, a
, b
, 0);
123 // CHECK-LABEL: @test_vbfmmlaq_f32(
124 // CHECK-NEXT: entry:
125 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
126 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
127 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[B:%.*]] to <16 x i8>
128 // CHECK-NEXT: [[VBFMMLAQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmmla(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[B]])
129 // CHECK-NEXT: [[VBFMMLAQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMMLAQ_V3_I]] to <16 x i8>
130 // CHECK-NEXT: ret <4 x float> [[VBFMMLAQ_V3_I]]
132 float32x4_t
test_vbfmmlaq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
133 return vbfmmlaq_f32(r
, a
, b
);
136 // CHECK-LABEL: @test_vbfmlalbq_f32(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
139 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
140 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[B:%.*]] to <16 x i8>
141 // CHECK-NEXT: [[VBFMLALBQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalb(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[B]])
142 // CHECK-NEXT: [[VBFMLALBQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALBQ_V3_I]] to <16 x i8>
143 // CHECK-NEXT: ret <4 x float> [[VBFMLALBQ_V3_I]]
145 float32x4_t
test_vbfmlalbq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
146 return vbfmlalbq_f32(r
, a
, b
);
149 // CHECK-LABEL: @test_vbfmlaltq_f32(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
152 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
153 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[B:%.*]] to <16 x i8>
154 // CHECK-NEXT: [[VBFMLALTQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalt(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[B]])
155 // CHECK-NEXT: [[VBFMLALTQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALTQ_V3_I]] to <16 x i8>
156 // CHECK-NEXT: ret <4 x float> [[VBFMLALTQ_V3_I]]
158 float32x4_t
test_vbfmlaltq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
159 return vbfmlaltq_f32(r
, a
, b
);
162 // CHECK-LABEL: @test_vbfmlalbq_lane_f32(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[B:%.*]], i32 0
165 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x bfloat> undef, bfloat [[VGET_LANE]], i32 0
166 // CHECK-NEXT: [[VGET_LANE3:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
167 // CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x bfloat> [[VECINIT]], bfloat [[VGET_LANE3]], i32 1
168 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
169 // CHECK-NEXT: [[VECINIT10:%.*]] = insertelement <8 x bfloat> [[VECINIT5]], bfloat [[VGET_LANE8]], i32 2
170 // CHECK-NEXT: [[VGET_LANE13:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
171 // CHECK-NEXT: [[VECINIT15:%.*]] = insertelement <8 x bfloat> [[VECINIT10]], bfloat [[VGET_LANE13]], i32 3
172 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
173 // CHECK-NEXT: [[VECINIT20:%.*]] = insertelement <8 x bfloat> [[VECINIT15]], bfloat [[VGET_LANE18]], i32 4
174 // CHECK-NEXT: [[VGET_LANE23:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
175 // CHECK-NEXT: [[VECINIT25:%.*]] = insertelement <8 x bfloat> [[VECINIT20]], bfloat [[VGET_LANE23]], i32 5
176 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
177 // CHECK-NEXT: [[VECINIT30:%.*]] = insertelement <8 x bfloat> [[VECINIT25]], bfloat [[VGET_LANE28]], i32 6
178 // CHECK-NEXT: [[VGET_LANE33:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
179 // CHECK-NEXT: [[VECINIT35:%.*]] = insertelement <8 x bfloat> [[VECINIT30]], bfloat [[VGET_LANE33]], i32 7
180 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
181 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
182 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[VECINIT35]] to <16 x i8>
183 // CHECK-NEXT: [[VBFMLALBQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalb(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[VECINIT35]])
184 // CHECK-NEXT: [[VBFMLALBQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALBQ_V3_I]] to <16 x i8>
185 // CHECK-NEXT: ret <4 x float> [[VBFMLALBQ_V3_I]]
187 float32x4_t
test_vbfmlalbq_lane_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x4_t b
) {
188 return vbfmlalbq_lane_f32(r
, a
, b
, 0);
191 // CHECK-LABEL: @test_vbfmlalbq_laneq_f32(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <8 x bfloat> [[B:%.*]], i32 3
194 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x bfloat> undef, bfloat [[VGET_LANE]], i32 0
195 // CHECK-NEXT: [[VGET_LANE3:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
196 // CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x bfloat> [[VECINIT]], bfloat [[VGET_LANE3]], i32 1
197 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
198 // CHECK-NEXT: [[VECINIT10:%.*]] = insertelement <8 x bfloat> [[VECINIT5]], bfloat [[VGET_LANE8]], i32 2
199 // CHECK-NEXT: [[VGET_LANE13:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
200 // CHECK-NEXT: [[VECINIT15:%.*]] = insertelement <8 x bfloat> [[VECINIT10]], bfloat [[VGET_LANE13]], i32 3
201 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
202 // CHECK-NEXT: [[VECINIT20:%.*]] = insertelement <8 x bfloat> [[VECINIT15]], bfloat [[VGET_LANE18]], i32 4
203 // CHECK-NEXT: [[VGET_LANE23:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
204 // CHECK-NEXT: [[VECINIT25:%.*]] = insertelement <8 x bfloat> [[VECINIT20]], bfloat [[VGET_LANE23]], i32 5
205 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
206 // CHECK-NEXT: [[VECINIT30:%.*]] = insertelement <8 x bfloat> [[VECINIT25]], bfloat [[VGET_LANE28]], i32 6
207 // CHECK-NEXT: [[VGET_LANE33:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
208 // CHECK-NEXT: [[VECINIT35:%.*]] = insertelement <8 x bfloat> [[VECINIT30]], bfloat [[VGET_LANE33]], i32 7
209 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
210 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
211 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[VECINIT35]] to <16 x i8>
212 // CHECK-NEXT: [[VBFMLALBQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalb(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[VECINIT35]])
213 // CHECK-NEXT: [[VBFMLALBQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALBQ_V3_I]] to <16 x i8>
214 // CHECK-NEXT: ret <4 x float> [[VBFMLALBQ_V3_I]]
216 float32x4_t
test_vbfmlalbq_laneq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
217 return vbfmlalbq_laneq_f32(r
, a
, b
, 3);
220 // CHECK-LABEL: @test_vbfmlaltq_lane_f32(
221 // CHECK-NEXT: entry:
222 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <4 x bfloat> [[B:%.*]], i32 0
223 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x bfloat> undef, bfloat [[VGET_LANE]], i32 0
224 // CHECK-NEXT: [[VGET_LANE3:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
225 // CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x bfloat> [[VECINIT]], bfloat [[VGET_LANE3]], i32 1
226 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
227 // CHECK-NEXT: [[VECINIT10:%.*]] = insertelement <8 x bfloat> [[VECINIT5]], bfloat [[VGET_LANE8]], i32 2
228 // CHECK-NEXT: [[VGET_LANE13:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
229 // CHECK-NEXT: [[VECINIT15:%.*]] = insertelement <8 x bfloat> [[VECINIT10]], bfloat [[VGET_LANE13]], i32 3
230 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
231 // CHECK-NEXT: [[VECINIT20:%.*]] = insertelement <8 x bfloat> [[VECINIT15]], bfloat [[VGET_LANE18]], i32 4
232 // CHECK-NEXT: [[VGET_LANE23:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
233 // CHECK-NEXT: [[VECINIT25:%.*]] = insertelement <8 x bfloat> [[VECINIT20]], bfloat [[VGET_LANE23]], i32 5
234 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
235 // CHECK-NEXT: [[VECINIT30:%.*]] = insertelement <8 x bfloat> [[VECINIT25]], bfloat [[VGET_LANE28]], i32 6
236 // CHECK-NEXT: [[VGET_LANE33:%.*]] = extractelement <4 x bfloat> [[B]], i32 0
237 // CHECK-NEXT: [[VECINIT35:%.*]] = insertelement <8 x bfloat> [[VECINIT30]], bfloat [[VGET_LANE33]], i32 7
238 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
239 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
240 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[VECINIT35]] to <16 x i8>
241 // CHECK-NEXT: [[VBFMLALTQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalt(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[VECINIT35]])
242 // CHECK-NEXT: [[VBFMLALTQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALTQ_V3_I]] to <16 x i8>
243 // CHECK-NEXT: ret <4 x float> [[VBFMLALTQ_V3_I]]
245 float32x4_t
test_vbfmlaltq_lane_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x4_t b
) {
246 return vbfmlaltq_lane_f32(r
, a
, b
, 0);
249 // CHECK-LABEL: @test_vbfmlaltq_laneq_f32(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[VGET_LANE:%.*]] = extractelement <8 x bfloat> [[B:%.*]], i32 3
252 // CHECK-NEXT: [[VECINIT:%.*]] = insertelement <8 x bfloat> undef, bfloat [[VGET_LANE]], i32 0
253 // CHECK-NEXT: [[VGET_LANE3:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
254 // CHECK-NEXT: [[VECINIT5:%.*]] = insertelement <8 x bfloat> [[VECINIT]], bfloat [[VGET_LANE3]], i32 1
255 // CHECK-NEXT: [[VGET_LANE8:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
256 // CHECK-NEXT: [[VECINIT10:%.*]] = insertelement <8 x bfloat> [[VECINIT5]], bfloat [[VGET_LANE8]], i32 2
257 // CHECK-NEXT: [[VGET_LANE13:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
258 // CHECK-NEXT: [[VECINIT15:%.*]] = insertelement <8 x bfloat> [[VECINIT10]], bfloat [[VGET_LANE13]], i32 3
259 // CHECK-NEXT: [[VGET_LANE18:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
260 // CHECK-NEXT: [[VECINIT20:%.*]] = insertelement <8 x bfloat> [[VECINIT15]], bfloat [[VGET_LANE18]], i32 4
261 // CHECK-NEXT: [[VGET_LANE23:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
262 // CHECK-NEXT: [[VECINIT25:%.*]] = insertelement <8 x bfloat> [[VECINIT20]], bfloat [[VGET_LANE23]], i32 5
263 // CHECK-NEXT: [[VGET_LANE28:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
264 // CHECK-NEXT: [[VECINIT30:%.*]] = insertelement <8 x bfloat> [[VECINIT25]], bfloat [[VGET_LANE28]], i32 6
265 // CHECK-NEXT: [[VGET_LANE33:%.*]] = extractelement <8 x bfloat> [[B]], i32 3
266 // CHECK-NEXT: [[VECINIT35:%.*]] = insertelement <8 x bfloat> [[VECINIT30]], bfloat [[VGET_LANE33]], i32 7
267 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[R:%.*]] to <16 x i8>
268 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
269 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x bfloat> [[VECINIT35]] to <16 x i8>
270 // CHECK-NEXT: [[VBFMLALTQ_V3_I:%.*]] = call <4 x float> @llvm.arm.neon.bfmlalt(<4 x float> [[R]], <8 x bfloat> [[A]], <8 x bfloat> [[VECINIT35]])
271 // CHECK-NEXT: [[VBFMLALTQ_V4_I:%.*]] = bitcast <4 x float> [[VBFMLALTQ_V3_I]] to <16 x i8>
272 // CHECK-NEXT: ret <4 x float> [[VBFMLALTQ_V3_I]]
274 float32x4_t
test_vbfmlaltq_laneq_f32(float32x4_t r
, bfloat16x8_t a
, bfloat16x8_t b
) {
275 return vbfmlaltq_laneq_f32(r
, a
, b
, 3);