[clangd] Re-land "support outgoing calls in call hierarchy" (#117673)
[llvm-project.git] / llvm / unittests / CodeGen / GlobalISel / MachineIRBuilderTest.cpp
blobc85e6d486e0acf2f2a9681fb749cb383d5bcdbc6
1 //===- MachineIRBuilderTest.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 TEST_F(AArch64GISelMITest, TestBuildConstantFConstant) {
13 setUp();
14 if (!TM)
15 GTEST_SKIP();
17 B.buildConstant(LLT::scalar(32), 42);
18 B.buildFConstant(LLT::scalar(32), 1.0);
20 B.buildConstant(LLT::fixed_vector(2, 32), 99);
21 B.buildFConstant(LLT::fixed_vector(2, 32), 2.0);
23 // Test APFloat overload.
24 APFloat KVal(APFloat::IEEEdouble(), "4.0");
25 B.buildFConstant(LLT::scalar(64), KVal);
27 auto CheckStr = R"(
28 CHECK: [[CONST0:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
29 CHECK: [[FCONST0:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
30 CHECK: [[CONST1:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
31 CHECK: [[VEC0:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[CONST1]]:_(s32), [[CONST1]]:_(s32)
32 CHECK: [[FCONST1:%[0-9]+]]:_(s32) = G_FCONSTANT float 2.000000e+00
33 CHECK: [[VEC1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[FCONST1]]:_(s32), [[FCONST1]]:_(s32)
34 CHECK: [[FCONST2:%[0-9]+]]:_(s64) = G_FCONSTANT double 4.000000e+00
35 )";
37 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
40 #ifdef GTEST_HAS_DEATH_TEST
41 #ifndef NDEBUG
43 TEST_F(AArch64GISelMITest, TestBuildConstantFConstantDeath) {
44 setUp();
45 if (!TM)
46 GTEST_SKIP();
48 LLVMContext &Ctx = MF->getFunction().getContext();
49 APInt APV32(32, 12345);
51 // Test APInt version breaks
52 EXPECT_DEATH(B.buildConstant(LLT::scalar(16), APV32),
53 "creating constant with the wrong size");
54 EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), APV32),
55 "creating constant with the wrong size");
57 // Test ConstantInt version breaks
58 ConstantInt *CI = ConstantInt::get(Ctx, APV32);
59 EXPECT_DEATH(B.buildConstant(LLT::scalar(16), *CI),
60 "creating constant with the wrong size");
61 EXPECT_DEATH(B.buildConstant(LLT::fixed_vector(2, 16), *CI),
62 "creating constant with the wrong size");
64 APFloat DoubleVal(APFloat::IEEEdouble());
65 ConstantFP *CF = ConstantFP::get(Ctx, DoubleVal);
66 EXPECT_DEATH(B.buildFConstant(LLT::scalar(16), *CF),
67 "creating fconstant with the wrong size");
68 EXPECT_DEATH(B.buildFConstant(LLT::fixed_vector(2, 16), *CF),
69 "creating fconstant with the wrong size");
72 #endif
73 #endif
75 TEST_F(AArch64GISelMITest, DstOpSrcOp) {
76 setUp();
77 if (!TM)
78 GTEST_SKIP();
80 SmallVector<Register, 4> Copies;
81 collectCopies(Copies, MF);
83 LLT s64 = LLT::scalar(64);
84 auto MIBAdd = B.buildAdd(s64, Copies[0], Copies[1]);
86 // Test SrcOp and DstOp can be constructed directly from MachineOperand by
87 // copying the instruction
88 B.buildAdd(MIBAdd->getOperand(0), MIBAdd->getOperand(1), MIBAdd->getOperand(2));
91 auto CheckStr = R"(
92 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
93 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
94 ; CHECK: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[COPY0]]:_, [[COPY1]]:_
95 ; CHECK: [[ADD]]:_(s64) = G_ADD [[COPY0]]:_, [[COPY1]]:_
96 )";
98 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
101 TEST_F(AArch64GISelMITest, BuildUnmerge) {
102 setUp();
103 if (!TM)
104 GTEST_SKIP();
106 SmallVector<Register, 4> Copies;
107 collectCopies(Copies, MF);
108 B.buildUnmerge(LLT::scalar(32), Copies[0]);
109 B.buildUnmerge(LLT::scalar(16), Copies[1]);
111 auto CheckStr = R"(
112 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
113 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
114 ; CHECK: [[UNMERGE32_0:%[0-9]+]]:_(s32), [[UNMERGE32_1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY0]]
115 ; CHECK: [[UNMERGE16_0:%[0-9]+]]:_(s16), [[UNMERGE16_1:%[0-9]+]]:_(s16), [[UNMERGE16_2:%[0-9]+]]:_(s16), [[UNMERGE16_3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]]
119 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
122 TEST_F(AArch64GISelMITest, TestBuildFPInsts) {
123 setUp();
124 if (!TM)
125 GTEST_SKIP();
127 SmallVector<Register, 4> Copies;
128 collectCopies(Copies, MF);
130 LLT S64 = LLT::scalar(64);
132 B.buildFAdd(S64, Copies[0], Copies[1]);
133 B.buildFSub(S64, Copies[0], Copies[1]);
134 B.buildFMA(S64, Copies[0], Copies[1], Copies[2]);
135 B.buildFMAD(S64, Copies[0], Copies[1], Copies[2]);
136 B.buildFMAD(S64, Copies[0], Copies[1], Copies[2], MachineInstr::FmNoNans);
137 B.buildFNeg(S64, Copies[0]);
138 B.buildFAbs(S64, Copies[0]);
139 B.buildFCopysign(S64, Copies[0], Copies[1]);
141 auto CheckStr = R"(
142 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
143 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
144 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
145 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY0]]:_, [[COPY1]]:_
146 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY0]]:_, [[COPY1]]:_
147 ; CHECK: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
148 ; CHECK: [[FMAD0:%[0-9]+]]:_(s64) = G_FMAD [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
149 ; CHECK: [[FMAD1:%[0-9]+]]:_(s64) = nnan G_FMAD [[COPY0]]:_, [[COPY1]]:_, [[COPY2]]:_
150 ; CHECK: [[FNEG:%[0-9]+]]:_(s64) = G_FNEG [[COPY0]]:_
151 ; CHECK: [[FABS:%[0-9]+]]:_(s64) = G_FABS [[COPY0]]:_
152 ; CHECK: [[FCOPYSIGN:%[0-9]+]]:_(s64) = G_FCOPYSIGN [[COPY0]]:_, [[COPY1]]:_
155 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
158 TEST_F(AArch64GISelMITest, BuildIntrinsic) {
159 setUp();
160 if (!TM)
161 GTEST_SKIP();
163 LLT S64 = LLT::scalar(64);
164 SmallVector<Register, 4> Copies;
165 collectCopies(Copies, MF);
167 // Make sure DstOp version works. sqrt is just a placeholder intrinsic.
168 B.buildIntrinsic(Intrinsic::sqrt, {S64}).addUse(Copies[0]);
170 // Make sure register version works
171 SmallVector<Register, 1> Results;
172 Results.push_back(MRI->createGenericVirtualRegister(S64));
173 B.buildIntrinsic(Intrinsic::sqrt, Results).addUse(Copies[1]);
175 auto CheckStr = R"(
176 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
177 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
178 ; CHECK: [[SQRT0:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.sqrt), [[COPY0]]:_
179 ; CHECK: [[SQRT1:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.sqrt), [[COPY1]]:_
182 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
185 TEST_F(AArch64GISelMITest, BuildXor) {
186 setUp();
187 if (!TM)
188 GTEST_SKIP();
190 LLT S64 = LLT::scalar(64);
191 LLT S128 = LLT::scalar(128);
192 SmallVector<Register, 4> Copies;
193 collectCopies(Copies, MF);
194 B.buildXor(S64, Copies[0], Copies[1]);
195 B.buildNot(S64, Copies[0]);
197 // Make sure this works with > 64-bit types
198 auto Merge = B.buildMergeLikeInstr(S128, {Copies[0], Copies[1]});
199 B.buildNot(S128, Merge);
200 auto CheckStr = R"(
201 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
202 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
203 ; CHECK: [[XOR0:%[0-9]+]]:_(s64) = G_XOR [[COPY0]]:_, [[COPY1]]:_
204 ; CHECK: [[NEGONE64:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
205 ; CHECK: [[XOR1:%[0-9]+]]:_(s64) = G_XOR [[COPY0]]:_, [[NEGONE64]]:_
206 ; CHECK: [[MERGE:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY0]]:_(s64), [[COPY1]]:_(s64)
207 ; CHECK: [[NEGONE128:%[0-9]+]]:_(s128) = G_CONSTANT i128 -1
208 ; CHECK: [[XOR2:%[0-9]+]]:_(s128) = G_XOR [[MERGE]]:_, [[NEGONE128]]:_
211 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
214 TEST_F(AArch64GISelMITest, BuildBitCounts) {
215 setUp();
216 if (!TM)
217 GTEST_SKIP();
219 LLT S32 = LLT::scalar(32);
220 SmallVector<Register, 4> Copies;
221 collectCopies(Copies, MF);
223 B.buildCTPOP(S32, Copies[0]);
224 B.buildCTLZ(S32, Copies[0]);
225 B.buildCTLZ_ZERO_UNDEF(S32, Copies[1]);
226 B.buildCTTZ(S32, Copies[0]);
227 B.buildCTTZ_ZERO_UNDEF(S32, Copies[1]);
229 auto CheckStr = R"(
230 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
231 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
232 ; CHECK: [[CTPOP:%[0-9]+]]:_(s32) = G_CTPOP [[COPY0]]:_
233 ; CHECK: [[CTLZ0:%[0-9]+]]:_(s32) = G_CTLZ [[COPY0]]:_
234 ; CHECK: [[CTLZ_UNDEF0:%[0-9]+]]:_(s32) = G_CTLZ_ZERO_UNDEF [[COPY1]]:_
235 ; CHECK: [[CTTZ:%[0-9]+]]:_(s32) = G_CTTZ [[COPY0]]:_
236 ; CHECK: [[CTTZ_UNDEF0:%[0-9]+]]:_(s32) = G_CTTZ_ZERO_UNDEF [[COPY1]]:_
239 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
242 TEST_F(AArch64GISelMITest, BuildCasts) {
243 setUp();
244 if (!TM)
245 GTEST_SKIP();
247 LLT S32 = LLT::scalar(32);
248 SmallVector<Register, 4> Copies;
249 collectCopies(Copies, MF);
251 B.buildUITOFP(S32, Copies[0]);
252 B.buildSITOFP(S32, Copies[0]);
253 B.buildFPTOUI(S32, Copies[0]);
254 B.buildFPTOSI(S32, Copies[0]);
256 auto CheckStr = R"(
257 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
258 ; CHECK: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[COPY0]]:_
259 ; CHECK: [[SITOFP:%[0-9]+]]:_(s32) = G_SITOFP [[COPY0]]:_
260 ; CHECK: [[FPTOUI:%[0-9]+]]:_(s32) = G_FPTOUI [[COPY0]]:_
261 ; CHECK: [[FPTOSI:%[0-9]+]]:_(s32) = G_FPTOSI [[COPY0]]:_
264 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
267 TEST_F(AArch64GISelMITest, BuildMinMaxAbs) {
268 setUp();
269 if (!TM)
270 GTEST_SKIP();
272 LLT S64 = LLT::scalar(64);
273 SmallVector<Register, 4> Copies;
274 collectCopies(Copies, MF);
276 B.buildSMin(S64, Copies[0], Copies[1]);
277 B.buildSMax(S64, Copies[0], Copies[1]);
278 B.buildUMin(S64, Copies[0], Copies[1]);
279 B.buildUMax(S64, Copies[0], Copies[1]);
280 B.buildAbs(S64, Copies[0]);
282 auto CheckStr = R"(
283 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
284 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
285 ; CHECK: [[SMIN0:%[0-9]+]]:_(s64) = G_SMIN [[COPY0]]:_, [[COPY1]]:_
286 ; CHECK: [[SMAX0:%[0-9]+]]:_(s64) = G_SMAX [[COPY0]]:_, [[COPY1]]:_
287 ; CHECK: [[UMIN0:%[0-9]+]]:_(s64) = G_UMIN [[COPY0]]:_, [[COPY1]]:_
288 ; CHECK: [[UMAX0:%[0-9]+]]:_(s64) = G_UMAX [[COPY0]]:_, [[COPY1]]:_
289 ; CHECK: [[UABS0:%[0-9]+]]:_(s64) = G_ABS [[COPY0]]:_
292 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
295 TEST_F(AArch64GISelMITest, BuildAtomicRMW) {
296 setUp();
297 if (!TM)
298 GTEST_SKIP();
300 LLT S64 = LLT::scalar(64);
301 LLT P0 = LLT::pointer(0, 64);
302 SmallVector<Register, 4> Copies;
303 collectCopies(Copies, MF);
305 MachineMemOperand *MMO = MF->getMachineMemOperand(
306 MachinePointerInfo(),
307 MachineMemOperand::MOLoad | MachineMemOperand::MOStore, 8, Align(8),
308 AAMDNodes(), nullptr, SyncScope::System, AtomicOrdering::Unordered);
310 auto Ptr = B.buildUndef(P0);
311 B.buildAtomicRMWFAdd(S64, Ptr, Copies[0], *MMO);
312 B.buildAtomicRMWFSub(S64, Ptr, Copies[0], *MMO);
314 auto CheckStr = R"(
315 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
316 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
317 ; CHECK: [[PTR:%[0-9]+]]:_(p0) = G_IMPLICIT_DEF
318 ; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_ATOMICRMW_FADD [[PTR]]:_(p0), [[COPY0]]:_ :: (load store unordered (s64))
319 ; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_ATOMICRMW_FSUB [[PTR]]:_(p0), [[COPY0]]:_ :: (load store unordered (s64))
322 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
325 TEST_F(AArch64GISelMITest, BuildMergeLikeInstr) {
326 setUp();
327 if (!TM)
328 GTEST_SKIP();
330 LLT S32 = LLT::scalar(32);
331 Register RegC0 = B.buildConstant(S32, 0).getReg(0);
332 Register RegC1 = B.buildConstant(S32, 1).getReg(0);
333 Register RegC2 = B.buildConstant(S32, 2).getReg(0);
334 Register RegC3 = B.buildConstant(S32, 3).getReg(0);
336 // Merging plain constants as one big blob of bit should produce a
337 // G_MERGE_VALUES.
338 B.buildMergeLikeInstr(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
339 // Merging plain constants to a vector should produce a G_BUILD_VECTOR.
340 LLT V2x32 = LLT::fixed_vector(2, 32);
341 Register RegC0C1 = B.buildMergeLikeInstr(V2x32, {RegC0, RegC1}).getReg(0);
342 Register RegC2C3 = B.buildMergeLikeInstr(V2x32, {RegC2, RegC3}).getReg(0);
343 // Merging vector constants to a vector should produce a G_CONCAT_VECTORS.
344 B.buildMergeLikeInstr(LLT::fixed_vector(4, 32), {RegC0C1, RegC2C3});
345 // Merging vector constants to a plain type is not allowed.
346 // Nothing else to test.
348 auto CheckStr = R"(
349 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
350 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
351 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
352 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
353 ; CHECK: {{%[0-9]+}}:_(s128) = G_MERGE_VALUES [[C0]]:_(s32), [[C1]]:_(s32), [[C2]]:_(s32), [[C3]]:_(s32)
354 ; CHECK: [[LOW2x32:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C0]]:_(s32), [[C1]]:_(s32)
355 ; CHECK: [[HIGH2x32:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C2]]:_(s32), [[C3]]:_(s32)
356 ; CHECK: {{%[0-9]+}}:_(<4 x s32>) = G_CONCAT_VECTORS [[LOW2x32]]:_(<2 x s32>), [[HIGH2x32]]:_(<2 x s32>)
359 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
362 using MachineIRBuilderDeathTest = AArch64GISelMITest;
364 TEST_F(MachineIRBuilderDeathTest, BuildMergeValues) {
365 setUp();
366 if (!TM)
367 GTEST_SKIP();
369 LLT S32 = LLT::scalar(32);
370 Register RegC0 = B.buildConstant(S32, 0).getReg(0);
371 Register RegC1 = B.buildConstant(S32, 1).getReg(0);
372 Register RegC2 = B.buildConstant(S32, 2).getReg(0);
373 Register RegC3 = B.buildConstant(S32, 3).getReg(0);
375 // Merging scalar constants should produce a G_MERGE_VALUES.
376 B.buildMergeValues(LLT::scalar(128), {RegC0, RegC1, RegC2, RegC3});
378 // Using a vector destination type should assert.
379 LLT V2x32 = LLT::fixed_vector(2, 32);
380 EXPECT_DEBUG_DEATH(
381 B.buildMergeValues(V2x32, {RegC0, RegC1}),
382 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
384 auto CheckStr = R"(
385 ; CHECK: [[C0:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
386 ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
387 ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
388 ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 3
389 ; CHECK: {{%[0-9]+}}:_(s128) = G_MERGE_VALUES [[C0]]:_(s32), [[C1]]:_(s32), [[C2]]:_(s32), [[C3]]:_(s32)
392 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
395 TEST_F(AArch64GISelMITest, BuildAddoSubo) {
396 setUp();
397 if (!TM)
398 GTEST_SKIP();
400 LLT S1 = LLT::scalar(1);
401 LLT S64 = LLT::scalar(64);
402 SmallVector<Register, 4> Copies;
403 collectCopies(Copies, MF);
405 auto UAddo = B.buildUAddo(S64, S1, Copies[0], Copies[1]);
406 auto USubo = B.buildUSubo(S64, S1, Copies[0], Copies[1]);
407 auto SAddo = B.buildSAddo(S64, S1, Copies[0], Copies[1]);
408 auto SSubo = B.buildSSubo(S64, S1, Copies[0], Copies[1]);
410 B.buildUAdde(S64, S1, Copies[0], Copies[1], UAddo.getReg(1));
411 B.buildUSube(S64, S1, Copies[0], Copies[1], USubo.getReg(1));
412 B.buildSAdde(S64, S1, Copies[0], Copies[1], SAddo.getReg(1));
413 B.buildSSube(S64, S1, Copies[0], Copies[1], SSubo.getReg(1));
415 auto CheckStr = R"(
416 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
417 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
418 ; CHECK: [[UADDO:%[0-9]+]]:_(s64), [[UADDO_FLAG:%[0-9]+]]:_(s1) = G_UADDO [[COPY0]]:_, [[COPY1]]:_
419 ; CHECK: [[USUBO:%[0-9]+]]:_(s64), [[USUBO_FLAG:%[0-9]+]]:_(s1) = G_USUBO [[COPY0]]:_, [[COPY1]]:_
420 ; CHECK: [[SADDO:%[0-9]+]]:_(s64), [[SADDO_FLAG:%[0-9]+]]:_(s1) = G_SADDO [[COPY0]]:_, [[COPY1]]:_
421 ; CHECK: [[SSUBO:%[0-9]+]]:_(s64), [[SSUBO_FLAG:%[0-9]+]]:_(s1) = G_SSUBO [[COPY0]]:_, [[COPY1]]:_
422 ; CHECK: [[UADDE:%[0-9]+]]:_(s64), [[UADDE_FLAG:%[0-9]+]]:_(s1) = G_UADDE [[COPY0]]:_, [[COPY1]]:_, [[UADDO_FLAG]]
423 ; CHECK: [[USUBE:%[0-9]+]]:_(s64), [[USUBE_FLAG:%[0-9]+]]:_(s1) = G_USUBE [[COPY0]]:_, [[COPY1]]:_, [[USUBO_FLAG]]
424 ; CHECK: [[SADDE:%[0-9]+]]:_(s64), [[SADDE_FLAG:%[0-9]+]]:_(s1) = G_SADDE [[COPY0]]:_, [[COPY1]]:_, [[SADDO_FLAG]]
425 ; CHECK: [[SSUBE:%[0-9]+]]:_(s64), [[SSUBE_FLAG:%[0-9]+]]:_(s1) = G_SSUBE [[COPY0]]:_, [[COPY1]]:_, [[SSUBO_FLAG]]
428 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
431 TEST_F(AArch64GISelMITest, BuildBitfieldExtract) {
432 setUp();
433 if (!TM)
434 GTEST_SKIP();
435 LLT S64 = LLT::scalar(64);
436 SmallVector<Register, 4> Copies;
437 collectCopies(Copies, MF);
439 auto Ubfx = B.buildUbfx(S64, Copies[0], Copies[1], Copies[2]);
440 B.buildSbfx(S64, Ubfx, Copies[0], Copies[2]);
442 const auto *CheckStr = R"(
443 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
444 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
445 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
446 ; CHECK: [[UBFX:%[0-9]+]]:_(s64) = G_UBFX [[COPY0]]:_, [[COPY1]]:_(s64), [[COPY2]]:_
447 ; CHECK: [[SBFX:%[0-9]+]]:_(s64) = G_SBFX [[UBFX]]:_, [[COPY0]]:_(s64), [[COPY2]]:_
450 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
453 TEST_F(AArch64GISelMITest, BuildFPEnv) {
454 setUp();
455 if (!TM)
456 GTEST_SKIP();
458 LLT S32 = LLT::scalar(32);
459 SmallVector<Register, 4> Copies;
460 collectCopies(Copies, MF);
462 B.buildGetFPEnv(Copies[0]);
463 B.buildSetFPEnv(Copies[1]);
464 B.buildResetFPEnv();
465 auto GetFPMode = B.buildGetFPMode(S32);
466 B.buildSetFPMode(GetFPMode);
467 B.buildResetFPMode();
469 auto CheckStr = R"(
470 ; CHECK: [[COPY0:%[0-9]+]]:_(s64) = COPY $x0
471 ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
472 ; CHECK: [[COPY2:%[0-9]+]]:_(s64) = COPY $x2
473 ; CHECK: [[COPY0]]:_(s64) = G_GET_FPENV
474 ; CHECK: G_SET_FPENV [[COPY1]]:_(s64)
475 ; CHECK: G_RESET_FPENV
476 ; CHECK: [[FPMODE:%[0-9]+]]:_(s32) = G_GET_FPMODE
477 ; CHECK: G_SET_FPMODE [[FPMODE]]:_(s32)
478 ; CHECK: G_RESET_FPMODE
481 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;
484 TEST_F(AArch64GISelMITest, BuildExtractSubvector) {
485 setUp();
486 if (!TM)
487 GTEST_SKIP();
489 LLT VecTy = LLT::fixed_vector(4, 32);
490 LLT SubVecTy = LLT::fixed_vector(2, 32);
491 auto Vec = B.buildUndef(VecTy);
492 B.buildExtractSubvector(SubVecTy, Vec, 0);
494 VecTy = LLT::scalable_vector(4, 32);
495 SubVecTy = LLT::scalable_vector(2, 32);
496 Vec = B.buildUndef(VecTy);
497 B.buildExtractSubvector(SubVecTy, Vec, 0);
499 auto CheckStr = R"(
500 ; CHECK: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
501 ; CHECK: [[EXTRACT_SUBVECTOR:%[0-9]+]]:_(<2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF]]:_(<4 x s32>), 0
502 ; CHECK: [[DEF1:%[0-9]+]]:_(<vscale x 4 x s32>) = G_IMPLICIT_DEF
503 ; CHECK: [[EXTRACT_SUBVECTOR1:%[0-9]+]]:_(<vscale x 2 x s32>) = G_EXTRACT_SUBVECTOR [[DEF1]]:_(<vscale x 4 x s32>), 0
506 EXPECT_TRUE(CheckMachineFunction(*MF, CheckStr)) << *MF;