1 //===- AArch64LegalizerInfo.cpp ----------------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the Machinelegalizer class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64LegalizerInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
17 #include "llvm/CodeGen/MachineInstr.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/TargetOpcodes.h"
20 #include "llvm/CodeGen/ValueTypes.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/Type.h"
25 using namespace LegalizeActions
;
26 using namespace LegalizeMutations
;
27 using namespace LegalityPredicates
;
29 AArch64LegalizerInfo::AArch64LegalizerInfo(const AArch64Subtarget
&ST
) {
30 using namespace TargetOpcode
;
31 const LLT p0
= LLT::pointer(0, 64);
32 const LLT s1
= LLT::scalar(1);
33 const LLT s8
= LLT::scalar(8);
34 const LLT s16
= LLT::scalar(16);
35 const LLT s32
= LLT::scalar(32);
36 const LLT s64
= LLT::scalar(64);
37 const LLT s128
= LLT::scalar(128);
38 const LLT s256
= LLT::scalar(256);
39 const LLT s512
= LLT::scalar(512);
40 const LLT v16s8
= LLT::vector(16, 8);
41 const LLT v8s8
= LLT::vector(8, 8);
42 const LLT v4s8
= LLT::vector(4, 8);
43 const LLT v8s16
= LLT::vector(8, 16);
44 const LLT v4s16
= LLT::vector(4, 16);
45 const LLT v2s16
= LLT::vector(2, 16);
46 const LLT v2s32
= LLT::vector(2, 32);
47 const LLT v4s32
= LLT::vector(4, 32);
48 const LLT v2s64
= LLT::vector(2, 64);
50 getActionDefinitionsBuilder(G_IMPLICIT_DEF
)
51 .legalFor({p0
, s1
, s8
, s16
, s32
, s64
, v2s64
})
52 .clampScalar(0, s1
, s64
)
53 .widenScalarToNextPow2(0, 8)
55 [=](const LegalityQuery
&Query
) {
56 return Query
.Types
[0].isVector() &&
57 (Query
.Types
[0].getElementType() != s64
||
58 Query
.Types
[0].getNumElements() != 2);
60 [=](const LegalityQuery
&Query
) {
61 LLT EltTy
= Query
.Types
[0].getElementType();
63 return std::make_pair(0, LLT::vector(2, 64));
64 return std::make_pair(0, EltTy
);
67 getActionDefinitionsBuilder(G_PHI
)
68 .legalFor({p0
, s16
, s32
, s64
})
69 .clampScalar(0, s16
, s64
)
70 .widenScalarToNextPow2(0);
72 getActionDefinitionsBuilder(G_BSWAP
)
74 .clampScalar(0, s16
, s64
)
75 .widenScalarToNextPow2(0);
77 getActionDefinitionsBuilder({G_ADD
, G_SUB
, G_MUL
, G_AND
, G_OR
, G_XOR
})
78 .legalFor({s32
, s64
, v2s32
, v4s32
, v2s64
})
79 .clampScalar(0, s32
, s64
)
80 .widenScalarToNextPow2(0)
81 .clampNumElements(0, v2s32
, v4s32
)
82 .clampNumElements(0, v2s64
, v2s64
)
83 .moreElementsToNextPow2(0);
85 getActionDefinitionsBuilder(G_SHL
)
86 .legalFor({{s32
, s32
}, {s64
, s64
},
87 {v2s32
, v2s32
}, {v4s32
, v4s32
}, {v2s64
, v2s64
}})
88 .clampScalar(1, s32
, s64
)
89 .clampScalar(0, s32
, s64
)
90 .widenScalarToNextPow2(0)
91 .clampNumElements(0, v2s32
, v4s32
)
92 .clampNumElements(0, v2s64
, v2s64
)
93 .moreElementsToNextPow2(0)
94 .minScalarSameAs(1, 0);
96 getActionDefinitionsBuilder(G_GEP
)
97 .legalFor({{p0
, s64
}})
98 .clampScalar(1, s64
, s64
);
100 getActionDefinitionsBuilder(G_PTR_MASK
).legalFor({p0
});
102 getActionDefinitionsBuilder({G_SDIV
, G_UDIV
})
103 .legalFor({s32
, s64
})
104 .clampScalar(0, s32
, s64
)
105 .widenScalarToNextPow2(0);
107 getActionDefinitionsBuilder({G_LSHR
, G_ASHR
})
108 .legalFor({{s32
, s32
}, {s64
, s64
}})
109 .clampScalar(1, s32
, s64
)
110 .clampScalar(0, s32
, s64
)
111 .minScalarSameAs(1, 0);
113 getActionDefinitionsBuilder({G_SREM
, G_UREM
})
114 .lowerFor({s1
, s8
, s16
, s32
, s64
});
116 getActionDefinitionsBuilder({G_SMULO
, G_UMULO
})
117 .lowerFor({{s64
, s1
}});
119 getActionDefinitionsBuilder({G_SMULH
, G_UMULH
}).legalFor({s32
, s64
});
121 getActionDefinitionsBuilder({G_UADDE
, G_USUBE
, G_SADDO
, G_SSUBO
})
122 .legalFor({{s32
, s1
}, {s64
, s1
}});
124 getActionDefinitionsBuilder({G_FADD
, G_FSUB
, G_FMA
, G_FMUL
, G_FDIV
, G_FNEG
})
125 .legalFor({s32
, s64
, v2s64
, v4s32
, v2s32
});
127 getActionDefinitionsBuilder({G_FREM
, G_FPOW
}).libcallFor({s32
, s64
});
129 getActionDefinitionsBuilder({G_FCEIL
, G_FABS
, G_FSQRT
, G_FFLOOR
})
130 // If we don't have full FP16 support, then scalarize the elements of
131 // vectors containing fp16 types.
133 [=, &ST
](const LegalityQuery
&Query
) {
134 const auto &Ty
= Query
.Types
[0];
135 return Ty
.isVector() && Ty
.getElementType() == s16
&&
138 [=](const LegalityQuery
&Query
) { return std::make_pair(0, s16
); })
139 // If we don't have full FP16 support, then widen s16 to s32 if we
142 [=, &ST
](const LegalityQuery
&Query
) {
143 return Query
.Types
[0] == s16
&& !ST
.hasFullFP16();
145 [=](const LegalityQuery
&Query
) { return std::make_pair(0, s32
); })
146 .legalFor({s16
, s32
, s64
, v2s32
, v4s32
, v2s64
, v2s16
, v4s16
, v8s16
});
148 getActionDefinitionsBuilder(
149 {G_FCOS
, G_FSIN
, G_FLOG10
, G_FLOG
, G_FLOG2
, G_FEXP
})
150 // We need a call for these, so we always need to scalarize.
152 // Regardless of FP16 support, widen 16-bit elements to 32-bits.
154 .libcallFor({s32
, s64
, v2s32
, v4s32
, v2s64
});
156 getActionDefinitionsBuilder(G_INSERT
)
157 .unsupportedIf([=](const LegalityQuery
&Query
) {
158 return Query
.Types
[0].getSizeInBits() <= Query
.Types
[1].getSizeInBits();
160 .legalIf([=](const LegalityQuery
&Query
) {
161 const LLT
&Ty0
= Query
.Types
[0];
162 const LLT
&Ty1
= Query
.Types
[1];
163 if (Ty0
!= s32
&& Ty0
!= s64
&& Ty0
!= p0
)
165 return isPowerOf2_32(Ty1
.getSizeInBits()) &&
166 (Ty1
.getSizeInBits() == 1 || Ty1
.getSizeInBits() >= 8);
168 .clampScalar(0, s32
, s64
)
169 .widenScalarToNextPow2(0)
170 .maxScalarIf(typeInSet(0, {s32
}), 1, s16
)
171 .maxScalarIf(typeInSet(0, {s64
}), 1, s32
)
172 .widenScalarToNextPow2(1);
174 getActionDefinitionsBuilder(G_EXTRACT
)
175 .unsupportedIf([=](const LegalityQuery
&Query
) {
176 return Query
.Types
[0].getSizeInBits() >= Query
.Types
[1].getSizeInBits();
178 .legalIf([=](const LegalityQuery
&Query
) {
179 const LLT
&Ty0
= Query
.Types
[0];
180 const LLT
&Ty1
= Query
.Types
[1];
181 if (Ty1
!= s32
&& Ty1
!= s64
)
185 return isPowerOf2_32(Ty0
.getSizeInBits()) &&
186 (Ty0
.getSizeInBits() == 1 || Ty0
.getSizeInBits() >= 8);
188 .clampScalar(1, s32
, s64
)
189 .widenScalarToNextPow2(1)
190 .maxScalarIf(typeInSet(1, {s32
}), 0, s16
)
191 .maxScalarIf(typeInSet(1, {s64
}), 0, s32
)
192 .widenScalarToNextPow2(0);
194 getActionDefinitionsBuilder({G_SEXTLOAD
, G_ZEXTLOAD
})
195 .legalForTypesWithMemDesc({{s32
, p0
, 8, 8},
201 .clampScalar(0, s32
, s64
)
202 .widenScalarToNextPow2(0)
203 // TODO: We could support sum-of-pow2's but the lowering code doesn't know
204 // how to do that yet.
205 .unsupportedIfMemSizeNotPow2()
206 // Lower anything left over into G_*EXT and G_LOAD
209 getActionDefinitionsBuilder(G_LOAD
)
210 .legalForTypesWithMemDesc({{s8
, p0
, 8, 8},
216 // These extends are also legal
217 .legalForTypesWithMemDesc({{s32
, p0
, 8, 8},
219 .clampScalar(0, s8
, s64
)
220 .widenScalarToNextPow2(0)
221 // TODO: We could support sum-of-pow2's but the lowering code doesn't know
222 // how to do that yet.
223 .unsupportedIfMemSizeNotPow2()
224 // Lower any any-extending loads left into G_ANYEXT and G_LOAD
225 .lowerIf([=](const LegalityQuery
&Query
) {
226 return Query
.Types
[0].getSizeInBits() != Query
.MMODescrs
[0].SizeInBits
;
228 .clampMaxNumElements(0, s32
, 2)
229 .clampMaxNumElements(0, s64
, 1);
231 getActionDefinitionsBuilder(G_STORE
)
232 .legalForTypesWithMemDesc({{s8
, p0
, 8, 8},
238 .clampScalar(0, s8
, s64
)
239 .widenScalarToNextPow2(0)
240 // TODO: We could support sum-of-pow2's but the lowering code doesn't know
241 // how to do that yet.
242 .unsupportedIfMemSizeNotPow2()
243 .lowerIf([=](const LegalityQuery
&Query
) {
244 return Query
.Types
[0].isScalar() &&
245 Query
.Types
[0].getSizeInBits() != Query
.MMODescrs
[0].SizeInBits
;
247 .clampMaxNumElements(0, s32
, 2)
248 .clampMaxNumElements(0, s64
, 1);
251 getActionDefinitionsBuilder(G_CONSTANT
)
252 .legalFor({p0
, s32
, s64
})
253 .clampScalar(0, s32
, s64
)
254 .widenScalarToNextPow2(0);
255 getActionDefinitionsBuilder(G_FCONSTANT
)
256 .legalFor({s32
, s64
})
257 .clampScalar(0, s32
, s64
);
259 getActionDefinitionsBuilder(G_ICMP
)
260 .legalFor({{s32
, s32
}, {s32
, s64
}, {s32
, p0
}})
261 .clampScalar(0, s32
, s32
)
262 .clampScalar(1, s32
, s64
)
263 .widenScalarToNextPow2(1);
265 getActionDefinitionsBuilder(G_FCMP
)
266 .legalFor({{s32
, s32
}, {s32
, s64
}})
267 .clampScalar(0, s32
, s32
)
268 .clampScalar(1, s32
, s64
)
269 .widenScalarToNextPow2(1);
272 getActionDefinitionsBuilder({G_ZEXT
, G_SEXT
, G_ANYEXT
})
273 .legalForCartesianProduct({s8
, s16
, s32
, s64
}, {s1
, s8
, s16
, s32
});
276 getActionDefinitionsBuilder(G_FPTRUNC
).legalFor(
277 {{s16
, s32
}, {s16
, s64
}, {s32
, s64
}, {v4s16
, v4s32
}, {v2s32
, v2s64
}});
278 getActionDefinitionsBuilder(G_FPEXT
).legalFor(
279 {{s32
, s16
}, {s64
, s16
}, {s64
, s32
}, {v4s32
, v4s16
}, {v2s64
, v2s32
}});
282 getActionDefinitionsBuilder({G_FPTOSI
, G_FPTOUI
})
283 .legalForCartesianProduct({s32
, s64
, v2s64
, v4s32
, v2s32
})
284 .clampScalar(0, s32
, s64
)
285 .widenScalarToNextPow2(0)
286 .clampScalar(1, s32
, s64
)
287 .widenScalarToNextPow2(1);
289 getActionDefinitionsBuilder({G_SITOFP
, G_UITOFP
})
290 .legalForCartesianProduct({s32
, s64
, v2s64
, v4s32
, v2s32
})
291 .clampScalar(1, s32
, s64
)
292 .widenScalarToNextPow2(1)
293 .clampScalar(0, s32
, s64
)
294 .widenScalarToNextPow2(0);
297 getActionDefinitionsBuilder(G_BRCOND
).legalFor({s1
, s8
, s16
, s32
});
298 getActionDefinitionsBuilder(G_BRINDIRECT
).legalFor({p0
});
301 getActionDefinitionsBuilder(G_SELECT
)
302 .legalFor({{s32
, s1
}, {s64
, s1
}, {p0
, s1
}})
303 .clampScalar(0, s32
, s64
)
304 .widenScalarToNextPow2(0);
307 getActionDefinitionsBuilder(G_FRAME_INDEX
).legalFor({p0
});
308 getActionDefinitionsBuilder(G_GLOBAL_VALUE
).legalFor({p0
});
310 getActionDefinitionsBuilder(G_PTRTOINT
)
311 .legalForCartesianProduct({s1
, s8
, s16
, s32
, s64
}, {p0
})
313 .widenScalarToNextPow2(0, /*Min*/ 8);
315 getActionDefinitionsBuilder(G_INTTOPTR
)
316 .unsupportedIf([&](const LegalityQuery
&Query
) {
317 return Query
.Types
[0].getSizeInBits() != Query
.Types
[1].getSizeInBits();
319 .legalFor({{p0
, s64
}});
321 // Casts for 32 and 64-bit width type are just copies.
322 // Same for 128-bit width type, except they are on the FPR bank.
323 getActionDefinitionsBuilder(G_BITCAST
)
324 // FIXME: This is wrong since G_BITCAST is not allowed to change the
325 // number of bits but it's what the previous code described and fixing
327 .legalForCartesianProduct({s1
, s8
, s16
, s32
, s64
, s128
, v16s8
, v8s8
, v4s8
,
328 v8s16
, v4s16
, v2s16
, v4s32
, v2s32
, v2s64
});
330 getActionDefinitionsBuilder(G_VASTART
).legalFor({p0
});
332 // va_list must be a pointer, but most sized types are pretty easy to handle
333 // as the destination.
334 getActionDefinitionsBuilder(G_VAARG
)
335 .customForCartesianProduct({s8
, s16
, s32
, s64
, p0
}, {p0
})
336 .clampScalar(0, s8
, s64
)
337 .widenScalarToNextPow2(0, /*Min*/ 8);
340 getActionDefinitionsBuilder(G_ATOMIC_CMPXCHG_WITH_SUCCESS
)
342 typeInSet(0, {s8
, s16
, s32
, s64
}), typeIs(1, s1
), typeIs(2, p0
),
343 atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Monotonic
)));
345 getActionDefinitionsBuilder(
346 {G_ATOMICRMW_XCHG
, G_ATOMICRMW_ADD
, G_ATOMICRMW_SUB
, G_ATOMICRMW_AND
,
347 G_ATOMICRMW_OR
, G_ATOMICRMW_XOR
, G_ATOMICRMW_MIN
, G_ATOMICRMW_MAX
,
348 G_ATOMICRMW_UMIN
, G_ATOMICRMW_UMAX
, G_ATOMIC_CMPXCHG
})
350 typeInSet(0, {s8
, s16
, s32
, s64
}), typeIs(1, p0
),
351 atomicOrderingAtLeastOrStrongerThan(0, AtomicOrdering::Monotonic
)));
354 getActionDefinitionsBuilder(G_BLOCK_ADDR
).legalFor({p0
});
357 for (unsigned Op
: {G_MERGE_VALUES
, G_UNMERGE_VALUES
}) {
358 unsigned BigTyIdx
= Op
== G_MERGE_VALUES
? 0 : 1;
359 unsigned LitTyIdx
= Op
== G_MERGE_VALUES
? 1 : 0;
361 auto notValidElt
= [](const LegalityQuery
&Query
, unsigned TypeIdx
) {
362 const LLT
&Ty
= Query
.Types
[TypeIdx
];
364 const LLT
&EltTy
= Ty
.getElementType();
365 if (EltTy
.getSizeInBits() < 8 || EltTy
.getSizeInBits() > 64)
367 if (!isPowerOf2_32(EltTy
.getSizeInBits()))
373 // FIXME: This rule is horrible, but specifies the same as what we had
374 // before with the particularly strange definitions removed (e.g.
375 // s8 = G_MERGE_VALUES s32, s32).
376 // Part of the complexity comes from these ops being extremely flexible. For
377 // example, you can build/decompose vectors with it, concatenate vectors,
378 // etc. and in addition to this you can also bitcast with it at the same
379 // time. We've been considering breaking it up into multiple ops to make it
380 // more manageable throughout the backend.
381 getActionDefinitionsBuilder(Op
)
382 // Break up vectors with weird elements into scalars
384 [=](const LegalityQuery
&Query
) { return notValidElt(Query
, 0); },
387 [=](const LegalityQuery
&Query
) { return notValidElt(Query
, 1); },
389 // Clamp the big scalar to s8-s512 and make it either a power of 2, 192,
391 .clampScalar(BigTyIdx
, s8
, s512
)
393 [=](const LegalityQuery
&Query
) {
394 const LLT
&Ty
= Query
.Types
[BigTyIdx
];
395 return !isPowerOf2_32(Ty
.getSizeInBits()) &&
396 Ty
.getSizeInBits() % 64 != 0;
398 [=](const LegalityQuery
&Query
) {
399 // Pick the next power of 2, or a multiple of 64 over 128.
400 // Whichever is smaller.
401 const LLT
&Ty
= Query
.Types
[BigTyIdx
];
402 unsigned NewSizeInBits
= 1
403 << Log2_32_Ceil(Ty
.getSizeInBits() + 1);
404 if (NewSizeInBits
>= 256) {
405 unsigned RoundedTo
= alignTo
<64>(Ty
.getSizeInBits() + 1);
406 if (RoundedTo
< NewSizeInBits
)
407 NewSizeInBits
= RoundedTo
;
409 return std::make_pair(BigTyIdx
, LLT::scalar(NewSizeInBits
));
411 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not
412 // worth considering the multiples of 64 since 2*192 and 2*384 are not
414 .clampScalar(LitTyIdx
, s8
, s256
)
415 .widenScalarToNextPow2(LitTyIdx
, /*Min*/ 8)
416 // So at this point, we have s8, s16, s32, s64, s128, s192, s256, s384,
417 // s512, <X x s8>, <X x s16>, <X x s32>, or <X x s64>.
418 // At this point it's simple enough to accept the legal types.
419 .legalIf([=](const LegalityQuery
&Query
) {
420 const LLT
&BigTy
= Query
.Types
[BigTyIdx
];
421 const LLT
&LitTy
= Query
.Types
[LitTyIdx
];
422 if (BigTy
.isVector() && BigTy
.getSizeInBits() < 32)
424 if (LitTy
.isVector() && LitTy
.getSizeInBits() < 32)
426 return BigTy
.getSizeInBits() % LitTy
.getSizeInBits() == 0;
428 // Any vectors left are the wrong size. Scalarize them.
433 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT
)
434 .unsupportedIf([=](const LegalityQuery
&Query
) {
435 const LLT
&EltTy
= Query
.Types
[1].getElementType();
436 return Query
.Types
[0] != EltTy
;
439 .legalIf([=](const LegalityQuery
&Query
) {
440 const LLT
&VecTy
= Query
.Types
[1];
441 return VecTy
== v4s32
|| VecTy
== v2s64
;
444 getActionDefinitionsBuilder(G_BUILD_VECTOR
)
445 .legalFor({{v4s16
, s16
},
450 .clampNumElements(0, v4s32
, v4s32
)
451 .clampNumElements(0, v2s64
, v2s64
)
453 // Deal with larger scalar types, which will be implicitly truncated.
454 .legalIf([=](const LegalityQuery
&Query
) {
455 return Query
.Types
[0].getScalarSizeInBits() <
456 Query
.Types
[1].getSizeInBits();
458 .minScalarSameAs(1, 0);
461 verify(*ST
.getInstrInfo());
464 bool AArch64LegalizerInfo::legalizeCustom(MachineInstr
&MI
,
465 MachineRegisterInfo
&MRI
,
466 MachineIRBuilder
&MIRBuilder
,
467 GISelChangeObserver
&Observer
) const {
468 switch (MI
.getOpcode()) {
470 // No idea what to do.
472 case TargetOpcode::G_VAARG
:
473 return legalizeVaArg(MI
, MRI
, MIRBuilder
);
476 llvm_unreachable("expected switch to return");
479 bool AArch64LegalizerInfo::legalizeVaArg(MachineInstr
&MI
,
480 MachineRegisterInfo
&MRI
,
481 MachineIRBuilder
&MIRBuilder
) const {
482 MIRBuilder
.setInstr(MI
);
483 MachineFunction
&MF
= MIRBuilder
.getMF();
484 unsigned Align
= MI
.getOperand(2).getImm();
485 unsigned Dst
= MI
.getOperand(0).getReg();
486 unsigned ListPtr
= MI
.getOperand(1).getReg();
488 LLT PtrTy
= MRI
.getType(ListPtr
);
489 LLT IntPtrTy
= LLT::scalar(PtrTy
.getSizeInBits());
491 const unsigned PtrSize
= PtrTy
.getSizeInBits() / 8;
492 unsigned List
= MRI
.createGenericVirtualRegister(PtrTy
);
493 MIRBuilder
.buildLoad(
495 *MF
.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad
,
496 PtrSize
, /* Align = */ PtrSize
));
499 if (Align
> PtrSize
) {
500 // Realign the list to the actual required alignment.
501 auto AlignMinus1
= MIRBuilder
.buildConstant(IntPtrTy
, Align
- 1);
503 unsigned ListTmp
= MRI
.createGenericVirtualRegister(PtrTy
);
504 MIRBuilder
.buildGEP(ListTmp
, List
, AlignMinus1
.getReg(0));
506 DstPtr
= MRI
.createGenericVirtualRegister(PtrTy
);
507 MIRBuilder
.buildPtrMask(DstPtr
, ListTmp
, Log2_64(Align
));
511 uint64_t ValSize
= MRI
.getType(Dst
).getSizeInBits() / 8;
512 MIRBuilder
.buildLoad(
514 *MF
.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOLoad
,
515 ValSize
, std::max(Align
, PtrSize
)));
517 unsigned SizeReg
= MRI
.createGenericVirtualRegister(IntPtrTy
);
518 MIRBuilder
.buildConstant(SizeReg
, alignTo(ValSize
, PtrSize
));
520 unsigned NewList
= MRI
.createGenericVirtualRegister(PtrTy
);
521 MIRBuilder
.buildGEP(NewList
, DstPtr
, SizeReg
);
523 MIRBuilder
.buildStore(
525 *MF
.getMachineMemOperand(MachinePointerInfo(), MachineMemOperand::MOStore
,
526 PtrSize
, /* Align = */ PtrSize
));
528 MI
.eraseFromParent();