[AMDGPU][AsmParser] Simplify the implementation of SWZ operands.
[llvm-project.git] / llvm / lib / Target / AMDGPU / AMDGPUISelLowering.cpp
blobf3cceaa1d1f5acfd8c2a133bb28c530d470c9eeb
1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This is the parent TargetLowering class for hardware code gen
11 /// targets.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUISelLowering.h"
16 #include "AMDGPU.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUMachineFunction.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/IntrinsicsAMDGPU.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/KnownBits.h"
28 #include "llvm/Target/TargetMachine.h"
30 using namespace llvm;
32 #include "AMDGPUGenCallingConv.inc"
34 static cl::opt<bool> AMDGPUBypassSlowDiv(
35 "amdgpu-bypass-slow-div",
36 cl::desc("Skip 64-bit divide for dynamic 32-bit values"),
37 cl::init(true));
39 // Find a larger type to do a load / store of a vector with.
40 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
41 unsigned StoreSize = VT.getStoreSizeInBits();
42 if (StoreSize <= 32)
43 return EVT::getIntegerVT(Ctx, StoreSize);
45 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
46 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
49 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
50 return DAG.computeKnownBits(Op).countMaxActiveBits();
53 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) {
54 // In order for this to be a signed 24-bit value, bit 23, must
55 // be a sign bit.
56 return DAG.ComputeMaxSignificantBits(Op);
59 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
60 const AMDGPUSubtarget &STI)
61 : TargetLowering(TM), Subtarget(&STI) {
62 // Lower floating point store/load to integer store/load to reduce the number
63 // of patterns in tablegen.
64 setOperationAction(ISD::LOAD, MVT::f32, Promote);
65 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
67 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
68 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
70 setOperationAction(ISD::LOAD, MVT::v3f32, Promote);
71 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32);
73 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
74 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
76 setOperationAction(ISD::LOAD, MVT::v5f32, Promote);
77 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32);
79 setOperationAction(ISD::LOAD, MVT::v6f32, Promote);
80 AddPromotedToType(ISD::LOAD, MVT::v6f32, MVT::v6i32);
82 setOperationAction(ISD::LOAD, MVT::v7f32, Promote);
83 AddPromotedToType(ISD::LOAD, MVT::v7f32, MVT::v7i32);
85 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
86 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
88 setOperationAction(ISD::LOAD, MVT::v9f32, Promote);
89 AddPromotedToType(ISD::LOAD, MVT::v9f32, MVT::v9i32);
91 setOperationAction(ISD::LOAD, MVT::v10f32, Promote);
92 AddPromotedToType(ISD::LOAD, MVT::v10f32, MVT::v10i32);
94 setOperationAction(ISD::LOAD, MVT::v11f32, Promote);
95 AddPromotedToType(ISD::LOAD, MVT::v11f32, MVT::v11i32);
97 setOperationAction(ISD::LOAD, MVT::v12f32, Promote);
98 AddPromotedToType(ISD::LOAD, MVT::v12f32, MVT::v12i32);
100 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
101 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
103 setOperationAction(ISD::LOAD, MVT::v32f32, Promote);
104 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32);
106 setOperationAction(ISD::LOAD, MVT::i64, Promote);
107 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
109 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
110 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32);
112 setOperationAction(ISD::LOAD, MVT::f64, Promote);
113 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32);
115 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
116 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32);
118 setOperationAction(ISD::LOAD, MVT::v3i64, Promote);
119 AddPromotedToType(ISD::LOAD, MVT::v3i64, MVT::v6i32);
121 setOperationAction(ISD::LOAD, MVT::v4i64, Promote);
122 AddPromotedToType(ISD::LOAD, MVT::v4i64, MVT::v8i32);
124 setOperationAction(ISD::LOAD, MVT::v3f64, Promote);
125 AddPromotedToType(ISD::LOAD, MVT::v3f64, MVT::v6i32);
127 setOperationAction(ISD::LOAD, MVT::v4f64, Promote);
128 AddPromotedToType(ISD::LOAD, MVT::v4f64, MVT::v8i32);
130 setOperationAction(ISD::LOAD, MVT::v8i64, Promote);
131 AddPromotedToType(ISD::LOAD, MVT::v8i64, MVT::v16i32);
133 setOperationAction(ISD::LOAD, MVT::v8f64, Promote);
134 AddPromotedToType(ISD::LOAD, MVT::v8f64, MVT::v16i32);
136 setOperationAction(ISD::LOAD, MVT::v16i64, Promote);
137 AddPromotedToType(ISD::LOAD, MVT::v16i64, MVT::v32i32);
139 setOperationAction(ISD::LOAD, MVT::v16f64, Promote);
140 AddPromotedToType(ISD::LOAD, MVT::v16f64, MVT::v32i32);
142 setOperationAction(ISD::LOAD, MVT::i128, Promote);
143 AddPromotedToType(ISD::LOAD, MVT::i128, MVT::v4i32);
145 // There are no 64-bit extloads. These should be done as a 32-bit extload and
146 // an extension to 64-bit.
147 for (MVT VT : MVT::integer_valuetypes())
148 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::i64, VT,
149 Expand);
151 for (MVT VT : MVT::integer_valuetypes()) {
152 if (VT == MVT::i64)
153 continue;
155 for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) {
156 setLoadExtAction(Op, VT, MVT::i1, Promote);
157 setLoadExtAction(Op, VT, MVT::i8, Legal);
158 setLoadExtAction(Op, VT, MVT::i16, Legal);
159 setLoadExtAction(Op, VT, MVT::i32, Expand);
163 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
164 for (auto MemVT :
165 {MVT::v2i8, MVT::v4i8, MVT::v2i16, MVT::v3i16, MVT::v4i16})
166 setLoadExtAction({ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}, VT, MemVT,
167 Expand);
169 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
170 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::bf16, Expand);
171 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
172 setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand);
173 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
174 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
175 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand);
176 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand);
178 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
179 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand);
180 setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f32, Expand);
181 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand);
182 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand);
183 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f32, Expand);
185 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
186 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::bf16, Expand);
187 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
188 setLoadExtAction(ISD::EXTLOAD, MVT::v3f64, MVT::v3f16, Expand);
189 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
190 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
191 setLoadExtAction(ISD::EXTLOAD, MVT::v16f64, MVT::v16f16, Expand);
193 setOperationAction(ISD::STORE, MVT::f32, Promote);
194 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
196 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
197 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
199 setOperationAction(ISD::STORE, MVT::v3f32, Promote);
200 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32);
202 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
203 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
205 setOperationAction(ISD::STORE, MVT::v5f32, Promote);
206 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32);
208 setOperationAction(ISD::STORE, MVT::v6f32, Promote);
209 AddPromotedToType(ISD::STORE, MVT::v6f32, MVT::v6i32);
211 setOperationAction(ISD::STORE, MVT::v7f32, Promote);
212 AddPromotedToType(ISD::STORE, MVT::v7f32, MVT::v7i32);
214 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
215 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
217 setOperationAction(ISD::STORE, MVT::v9f32, Promote);
218 AddPromotedToType(ISD::STORE, MVT::v9f32, MVT::v9i32);
220 setOperationAction(ISD::STORE, MVT::v10f32, Promote);
221 AddPromotedToType(ISD::STORE, MVT::v10f32, MVT::v10i32);
223 setOperationAction(ISD::STORE, MVT::v11f32, Promote);
224 AddPromotedToType(ISD::STORE, MVT::v11f32, MVT::v11i32);
226 setOperationAction(ISD::STORE, MVT::v12f32, Promote);
227 AddPromotedToType(ISD::STORE, MVT::v12f32, MVT::v12i32);
229 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
230 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
232 setOperationAction(ISD::STORE, MVT::v32f32, Promote);
233 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32);
235 setOperationAction(ISD::STORE, MVT::i64, Promote);
236 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
238 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
239 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32);
241 setOperationAction(ISD::STORE, MVT::f64, Promote);
242 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32);
244 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
245 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32);
247 setOperationAction(ISD::STORE, MVT::v3i64, Promote);
248 AddPromotedToType(ISD::STORE, MVT::v3i64, MVT::v6i32);
250 setOperationAction(ISD::STORE, MVT::v3f64, Promote);
251 AddPromotedToType(ISD::STORE, MVT::v3f64, MVT::v6i32);
253 setOperationAction(ISD::STORE, MVT::v4i64, Promote);
254 AddPromotedToType(ISD::STORE, MVT::v4i64, MVT::v8i32);
256 setOperationAction(ISD::STORE, MVT::v4f64, Promote);
257 AddPromotedToType(ISD::STORE, MVT::v4f64, MVT::v8i32);
259 setOperationAction(ISD::STORE, MVT::v8i64, Promote);
260 AddPromotedToType(ISD::STORE, MVT::v8i64, MVT::v16i32);
262 setOperationAction(ISD::STORE, MVT::v8f64, Promote);
263 AddPromotedToType(ISD::STORE, MVT::v8f64, MVT::v16i32);
265 setOperationAction(ISD::STORE, MVT::v16i64, Promote);
266 AddPromotedToType(ISD::STORE, MVT::v16i64, MVT::v32i32);
268 setOperationAction(ISD::STORE, MVT::v16f64, Promote);
269 AddPromotedToType(ISD::STORE, MVT::v16f64, MVT::v32i32);
271 setOperationAction(ISD::STORE, MVT::i128, Promote);
272 AddPromotedToType(ISD::STORE, MVT::i128, MVT::v4i32);
274 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
275 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
276 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
277 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
279 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
280 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand);
281 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand);
282 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand);
284 setTruncStoreAction(MVT::f32, MVT::bf16, Expand);
285 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
286 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
287 setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand);
288 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
289 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
290 setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand);
291 setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand);
293 setTruncStoreAction(MVT::f64, MVT::bf16, Expand);
294 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
295 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
297 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand);
298 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand);
300 setTruncStoreAction(MVT::v3i64, MVT::v3i32, Expand);
301 setTruncStoreAction(MVT::v3i64, MVT::v3i16, Expand);
302 setTruncStoreAction(MVT::v3f64, MVT::v3f32, Expand);
303 setTruncStoreAction(MVT::v3f64, MVT::v3f16, Expand);
305 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Expand);
306 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Expand);
307 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand);
308 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand);
310 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand);
311 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand);
313 setTruncStoreAction(MVT::v16f64, MVT::v16f32, Expand);
314 setTruncStoreAction(MVT::v16f64, MVT::v16f16, Expand);
315 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
316 setTruncStoreAction(MVT::v16i64, MVT::v16i16, Expand);
317 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
318 setTruncStoreAction(MVT::v16i64, MVT::v16i8, Expand);
319 setTruncStoreAction(MVT::v16i64, MVT::v16i1, Expand);
321 setOperationAction(ISD::Constant, {MVT::i32, MVT::i64}, Legal);
322 setOperationAction(ISD::ConstantFP, {MVT::f32, MVT::f64}, Legal);
324 setOperationAction({ISD::BR_JT, ISD::BRIND}, MVT::Other, Expand);
326 // This is totally unsupported, just custom lower to produce an error.
327 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
329 // Library functions. These default to Expand, but we have instructions
330 // for them.
331 setOperationAction({ISD::FCEIL, ISD::FEXP2, ISD::FPOW, ISD::FABS, ISD::FFLOOR,
332 ISD::FRINT, ISD::FTRUNC, ISD::FMINNUM, ISD::FMAXNUM},
333 MVT::f32, Legal);
335 setOperationAction(ISD::FLOG2, MVT::f32, Custom);
336 setOperationAction(ISD::FROUND, {MVT::f32, MVT::f64}, Custom);
338 setOperationAction({ISD::FLOG, ISD::FLOG10, ISD::FEXP}, MVT::f32, Custom);
340 setOperationAction(ISD::FNEARBYINT, {MVT::f16, MVT::f32, MVT::f64}, Custom);
342 setOperationAction(ISD::FROUNDEVEN, {MVT::f16, MVT::f32, MVT::f64}, Custom);
344 setOperationAction(ISD::FREM, {MVT::f16, MVT::f32, MVT::f64}, Custom);
346 if (Subtarget->has16BitInsts())
347 setOperationAction(ISD::IS_FPCLASS, {MVT::f16, MVT::f32, MVT::f64}, Legal);
348 else {
349 setOperationAction(ISD::IS_FPCLASS, {MVT::f32, MVT::f64}, Legal);
350 setOperationAction(ISD::FLOG2, MVT::f16, Custom);
353 // FIXME: These IS_FPCLASS vector fp types are marked custom so it reaches
354 // scalarization code. Can be removed when IS_FPCLASS expand isn't called by
355 // default unless marked custom/legal.
356 setOperationAction(
357 ISD::IS_FPCLASS,
358 {MVT::v2f16, MVT::v3f16, MVT::v4f16, MVT::v16f16, MVT::v2f32, MVT::v3f32,
359 MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32,
360 MVT::v2f64, MVT::v3f64, MVT::v4f64, MVT::v8f64, MVT::v16f64},
361 Custom);
363 // Expand to fneg + fadd.
364 setOperationAction(ISD::FSUB, MVT::f64, Expand);
366 setOperationAction(ISD::CONCAT_VECTORS,
367 {MVT::v3i32, MVT::v3f32, MVT::v4i32, MVT::v4f32,
368 MVT::v5i32, MVT::v5f32, MVT::v6i32, MVT::v6f32,
369 MVT::v7i32, MVT::v7f32, MVT::v8i32, MVT::v8f32,
370 MVT::v9i32, MVT::v9f32, MVT::v10i32, MVT::v10f32,
371 MVT::v11i32, MVT::v11f32, MVT::v12i32, MVT::v12f32},
372 Custom);
373 setOperationAction(
374 ISD::EXTRACT_SUBVECTOR,
375 {MVT::v2f16, MVT::v2i16, MVT::v4f16, MVT::v4i16, MVT::v2f32,
376 MVT::v2i32, MVT::v3f32, MVT::v3i32, MVT::v4f32, MVT::v4i32,
377 MVT::v5f32, MVT::v5i32, MVT::v6f32, MVT::v6i32, MVT::v7f32,
378 MVT::v7i32, MVT::v8f32, MVT::v8i32, MVT::v9f32, MVT::v9i32,
379 MVT::v10i32, MVT::v10f32, MVT::v11i32, MVT::v11f32, MVT::v12i32,
380 MVT::v12f32, MVT::v16f16, MVT::v16i16, MVT::v16f32, MVT::v16i32,
381 MVT::v32f32, MVT::v32i32, MVT::v2f64, MVT::v2i64, MVT::v3f64,
382 MVT::v3i64, MVT::v4f64, MVT::v4i64, MVT::v8f64, MVT::v8i64,
383 MVT::v16f64, MVT::v16i64},
384 Custom);
386 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
387 setOperationAction(ISD::FP_TO_FP16, {MVT::f64, MVT::f32}, Custom);
389 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
390 for (MVT VT : ScalarIntVTs) {
391 // These should use [SU]DIVREM, so set them to expand
392 setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM}, VT,
393 Expand);
395 // GPU does not have divrem function for signed or unsigned.
396 setOperationAction({ISD::SDIVREM, ISD::UDIVREM}, VT, Custom);
398 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
399 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand);
401 setOperationAction({ISD::BSWAP, ISD::CTTZ, ISD::CTLZ}, VT, Expand);
403 // AMDGPU uses ADDC/SUBC/ADDE/SUBE
404 setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, Legal);
407 // The hardware supports 32-bit FSHR, but not FSHL.
408 setOperationAction(ISD::FSHR, MVT::i32, Legal);
410 // The hardware supports 32-bit ROTR, but not ROTL.
411 setOperationAction(ISD::ROTL, {MVT::i32, MVT::i64}, Expand);
412 setOperationAction(ISD::ROTR, MVT::i64, Expand);
414 setOperationAction({ISD::MULHU, ISD::MULHS}, MVT::i16, Expand);
416 setOperationAction({ISD::MUL, ISD::MULHU, ISD::MULHS}, MVT::i64, Expand);
417 setOperationAction(
418 {ISD::UINT_TO_FP, ISD::SINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT},
419 MVT::i64, Custom);
420 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
422 setOperationAction({ISD::SMIN, ISD::UMIN, ISD::SMAX, ISD::UMAX}, MVT::i32,
423 Legal);
425 setOperationAction(
426 {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF},
427 MVT::i64, Custom);
429 static const MVT::SimpleValueType VectorIntTypes[] = {
430 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32, MVT::v6i32, MVT::v7i32,
431 MVT::v9i32, MVT::v10i32, MVT::v11i32, MVT::v12i32};
433 for (MVT VT : VectorIntTypes) {
434 // Expand the following operations for the current type by default.
435 setOperationAction({ISD::ADD, ISD::AND, ISD::FP_TO_SINT,
436 ISD::FP_TO_UINT, ISD::MUL, ISD::MULHU,
437 ISD::MULHS, ISD::OR, ISD::SHL,
438 ISD::SRA, ISD::SRL, ISD::ROTL,
439 ISD::ROTR, ISD::SUB, ISD::SINT_TO_FP,
440 ISD::UINT_TO_FP, ISD::SDIV, ISD::UDIV,
441 ISD::SREM, ISD::UREM, ISD::SMUL_LOHI,
442 ISD::UMUL_LOHI, ISD::SDIVREM, ISD::UDIVREM,
443 ISD::SELECT, ISD::VSELECT, ISD::SELECT_CC,
444 ISD::XOR, ISD::BSWAP, ISD::CTPOP,
445 ISD::CTTZ, ISD::CTLZ, ISD::VECTOR_SHUFFLE,
446 ISD::SETCC},
447 VT, Expand);
450 static const MVT::SimpleValueType FloatVectorTypes[] = {
451 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32, MVT::v6f32, MVT::v7f32,
452 MVT::v9f32, MVT::v10f32, MVT::v11f32, MVT::v12f32};
454 for (MVT VT : FloatVectorTypes) {
455 setOperationAction(
456 {ISD::FABS, ISD::FMINNUM, ISD::FMAXNUM, ISD::FADD,
457 ISD::FCEIL, ISD::FCOS, ISD::FDIV, ISD::FEXP2,
458 ISD::FEXP, ISD::FLOG2, ISD::FREM, ISD::FLOG,
459 ISD::FLOG10, ISD::FPOW, ISD::FFLOOR, ISD::FTRUNC,
460 ISD::FMUL, ISD::FMA, ISD::FRINT, ISD::FNEARBYINT,
461 ISD::FSQRT, ISD::FSIN, ISD::FSUB, ISD::FNEG,
462 ISD::VSELECT, ISD::SELECT_CC, ISD::FCOPYSIGN, ISD::VECTOR_SHUFFLE,
463 ISD::SETCC, ISD::FCANONICALIZE},
464 VT, Expand);
467 // This causes using an unrolled select operation rather than expansion with
468 // bit operations. This is in general better, but the alternative using BFI
469 // instructions may be better if the select sources are SGPRs.
470 setOperationAction(ISD::SELECT, MVT::v2f32, Promote);
471 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32);
473 setOperationAction(ISD::SELECT, MVT::v3f32, Promote);
474 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32);
476 setOperationAction(ISD::SELECT, MVT::v4f32, Promote);
477 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32);
479 setOperationAction(ISD::SELECT, MVT::v5f32, Promote);
480 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32);
482 setOperationAction(ISD::SELECT, MVT::v6f32, Promote);
483 AddPromotedToType(ISD::SELECT, MVT::v6f32, MVT::v6i32);
485 setOperationAction(ISD::SELECT, MVT::v7f32, Promote);
486 AddPromotedToType(ISD::SELECT, MVT::v7f32, MVT::v7i32);
488 setOperationAction(ISD::SELECT, MVT::v9f32, Promote);
489 AddPromotedToType(ISD::SELECT, MVT::v9f32, MVT::v9i32);
491 setOperationAction(ISD::SELECT, MVT::v10f32, Promote);
492 AddPromotedToType(ISD::SELECT, MVT::v10f32, MVT::v10i32);
494 setOperationAction(ISD::SELECT, MVT::v11f32, Promote);
495 AddPromotedToType(ISD::SELECT, MVT::v11f32, MVT::v11i32);
497 setOperationAction(ISD::SELECT, MVT::v12f32, Promote);
498 AddPromotedToType(ISD::SELECT, MVT::v12f32, MVT::v12i32);
500 // There are no libcalls of any kind.
501 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I)
502 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr);
504 setSchedulingPreference(Sched::RegPressure);
505 setJumpIsExpensive(true);
507 // FIXME: This is only partially true. If we have to do vector compares, any
508 // SGPR pair can be a condition register. If we have a uniform condition, we
509 // are better off doing SALU operations, where there is only one SCC. For now,
510 // we don't have a way of knowing during instruction selection if a condition
511 // will be uniform and we always use vector compares. Assume we are using
512 // vector compares until that is fixed.
513 setHasMultipleConditionRegisters(true);
515 setMinCmpXchgSizeInBits(32);
516 setSupportsUnalignedAtomics(false);
518 PredictableSelectIsExpensive = false;
520 // We want to find all load dependencies for long chains of stores to enable
521 // merging into very wide vectors. The problem is with vectors with > 4
522 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
523 // vectors are a legal type, even though we have to split the loads
524 // usually. When we can more precisely specify load legality per address
525 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
526 // smarter so that they can figure out what to do in 2 iterations without all
527 // N > 4 stores on the same chain.
528 GatherAllAliasesMaxDepth = 16;
530 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry
531 // about these during lowering.
532 MaxStoresPerMemcpy = 0xffffffff;
533 MaxStoresPerMemmove = 0xffffffff;
534 MaxStoresPerMemset = 0xffffffff;
536 // The expansion for 64-bit division is enormous.
537 if (AMDGPUBypassSlowDiv)
538 addBypassSlowDiv(64, 32);
540 setTargetDAGCombine({ISD::BITCAST, ISD::SHL,
541 ISD::SRA, ISD::SRL,
542 ISD::TRUNCATE, ISD::MUL,
543 ISD::SMUL_LOHI, ISD::UMUL_LOHI,
544 ISD::MULHU, ISD::MULHS,
545 ISD::SELECT, ISD::SELECT_CC,
546 ISD::STORE, ISD::FADD,
547 ISD::FSUB, ISD::FNEG,
548 ISD::FABS, ISD::AssertZext,
549 ISD::AssertSext, ISD::INTRINSIC_WO_CHAIN});
552 bool AMDGPUTargetLowering::mayIgnoreSignedZero(SDValue Op) const {
553 if (getTargetMachine().Options.NoSignedZerosFPMath)
554 return true;
556 const auto Flags = Op.getNode()->getFlags();
557 if (Flags.hasNoSignedZeros())
558 return true;
560 return false;
563 //===----------------------------------------------------------------------===//
564 // Target Information
565 //===----------------------------------------------------------------------===//
567 LLVM_READNONE
568 static bool fnegFoldsIntoOpcode(unsigned Opc) {
569 switch (Opc) {
570 case ISD::FADD:
571 case ISD::FSUB:
572 case ISD::FMUL:
573 case ISD::FMA:
574 case ISD::FMAD:
575 case ISD::FMINNUM:
576 case ISD::FMAXNUM:
577 case ISD::FMINNUM_IEEE:
578 case ISD::FMAXNUM_IEEE:
579 case ISD::SELECT:
580 case ISD::FSIN:
581 case ISD::FTRUNC:
582 case ISD::FRINT:
583 case ISD::FNEARBYINT:
584 case ISD::FCANONICALIZE:
585 case AMDGPUISD::RCP:
586 case AMDGPUISD::RCP_LEGACY:
587 case AMDGPUISD::RCP_IFLAG:
588 case AMDGPUISD::SIN_HW:
589 case AMDGPUISD::FMUL_LEGACY:
590 case AMDGPUISD::FMIN_LEGACY:
591 case AMDGPUISD::FMAX_LEGACY:
592 case AMDGPUISD::FMED3:
593 // TODO: handle llvm.amdgcn.fma.legacy
594 return true;
595 case ISD::BITCAST:
596 llvm_unreachable("bitcast is special cased");
597 default:
598 return false;
602 static bool fnegFoldsIntoOp(const SDNode *N) {
603 unsigned Opc = N->getOpcode();
604 if (Opc == ISD::BITCAST) {
605 // TODO: Is there a benefit to checking the conditions performFNegCombine
606 // does? We don't for the other cases.
607 SDValue BCSrc = N->getOperand(0);
608 if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
609 return BCSrc.getNumOperands() == 2 &&
610 BCSrc.getOperand(1).getValueSizeInBits() == 32;
613 return BCSrc.getOpcode() == ISD::SELECT && BCSrc.getValueType() == MVT::f32;
616 return fnegFoldsIntoOpcode(Opc);
619 /// \p returns true if the operation will definitely need to use a 64-bit
620 /// encoding, and thus will use a VOP3 encoding regardless of the source
621 /// modifiers.
622 LLVM_READONLY
623 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) {
624 return (N->getNumOperands() > 2 && N->getOpcode() != ISD::SELECT) ||
625 VT == MVT::f64;
628 /// Return true if v_cndmask_b32 will support fabs/fneg source modifiers for the
629 /// type for ISD::SELECT.
630 LLVM_READONLY
631 static bool selectSupportsSourceMods(const SDNode *N) {
632 // TODO: Only applies if select will be vector
633 return N->getValueType(0) == MVT::f32;
636 // Most FP instructions support source modifiers, but this could be refined
637 // slightly.
638 LLVM_READONLY
639 static bool hasSourceMods(const SDNode *N) {
640 if (isa<MemSDNode>(N))
641 return false;
643 switch (N->getOpcode()) {
644 case ISD::CopyToReg:
645 case ISD::FDIV:
646 case ISD::FREM:
647 case ISD::INLINEASM:
648 case ISD::INLINEASM_BR:
649 case AMDGPUISD::DIV_SCALE:
650 case ISD::INTRINSIC_W_CHAIN:
652 // TODO: Should really be looking at the users of the bitcast. These are
653 // problematic because bitcasts are used to legalize all stores to integer
654 // types.
655 case ISD::BITCAST:
656 return false;
657 case ISD::INTRINSIC_WO_CHAIN: {
658 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) {
659 case Intrinsic::amdgcn_interp_p1:
660 case Intrinsic::amdgcn_interp_p2:
661 case Intrinsic::amdgcn_interp_mov:
662 case Intrinsic::amdgcn_interp_p1_f16:
663 case Intrinsic::amdgcn_interp_p2_f16:
664 return false;
665 default:
666 return true;
669 case ISD::SELECT:
670 return selectSupportsSourceMods(N);
671 default:
672 return true;
676 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N,
677 unsigned CostThreshold) {
678 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus
679 // it is truly free to use a source modifier in all cases. If there are
680 // multiple users but for each one will necessitate using VOP3, there will be
681 // a code size increase. Try to avoid increasing code size unless we know it
682 // will save on the instruction count.
683 unsigned NumMayIncreaseSize = 0;
684 MVT VT = N->getValueType(0).getScalarType().getSimpleVT();
686 assert(!N->use_empty());
688 // XXX - Should this limit number of uses to check?
689 for (const SDNode *U : N->uses()) {
690 if (!hasSourceMods(U))
691 return false;
693 if (!opMustUseVOP3Encoding(U, VT)) {
694 if (++NumMayIncreaseSize > CostThreshold)
695 return false;
699 return true;
702 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
703 ISD::NodeType ExtendKind) const {
704 assert(!VT.isVector() && "only scalar expected");
706 // Round to the next multiple of 32-bits.
707 unsigned Size = VT.getSizeInBits();
708 if (Size <= 32)
709 return MVT::i32;
710 return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32));
713 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
714 return MVT::i32;
717 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
718 return true;
721 // The backend supports 32 and 64 bit floating point immediates.
722 // FIXME: Why are we reporting vectors of FP immediates as legal?
723 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
724 bool ForCodeSize) const {
725 EVT ScalarVT = VT.getScalarType();
726 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 ||
727 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts()));
730 // We don't want to shrink f64 / f32 constants.
731 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
732 EVT ScalarVT = VT.getScalarType();
733 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
736 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
737 ISD::LoadExtType ExtTy,
738 EVT NewVT) const {
739 // TODO: This may be worth removing. Check regression tests for diffs.
740 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT))
741 return false;
743 unsigned NewSize = NewVT.getStoreSizeInBits();
745 // If we are reducing to a 32-bit load or a smaller multi-dword load,
746 // this is always better.
747 if (NewSize >= 32)
748 return true;
750 EVT OldVT = N->getValueType(0);
751 unsigned OldSize = OldVT.getStoreSizeInBits();
753 MemSDNode *MN = cast<MemSDNode>(N);
754 unsigned AS = MN->getAddressSpace();
755 // Do not shrink an aligned scalar load to sub-dword.
756 // Scalar engine cannot do sub-dword loads.
757 if (OldSize >= 32 && NewSize < 32 && MN->getAlign() >= Align(4) &&
758 (AS == AMDGPUAS::CONSTANT_ADDRESS ||
759 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT ||
760 (isa<LoadSDNode>(N) && AS == AMDGPUAS::GLOBAL_ADDRESS &&
761 MN->isInvariant())) &&
762 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand()))
763 return false;
765 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
766 // extloads, so doing one requires using a buffer_load. In cases where we
767 // still couldn't use a scalar load, using the wider load shouldn't really
768 // hurt anything.
770 // If the old size already had to be an extload, there's no harm in continuing
771 // to reduce the width.
772 return (OldSize < 32);
775 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
776 const SelectionDAG &DAG,
777 const MachineMemOperand &MMO) const {
779 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits());
781 if (LoadTy.getScalarType() == MVT::i32)
782 return false;
784 unsigned LScalarSize = LoadTy.getScalarSizeInBits();
785 unsigned CastScalarSize = CastTy.getScalarSizeInBits();
787 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32))
788 return false;
790 unsigned Fast = 0;
791 return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
792 CastTy, MMO, &Fast) &&
793 Fast;
796 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
797 // profitable with the expansion for 64-bit since it's generally good to
798 // speculate things.
799 bool AMDGPUTargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
800 return true;
803 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
804 return true;
807 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode *N) const {
808 switch (N->getOpcode()) {
809 case ISD::EntryToken:
810 case ISD::TokenFactor:
811 return true;
812 case ISD::INTRINSIC_WO_CHAIN: {
813 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
814 switch (IntrID) {
815 case Intrinsic::amdgcn_readfirstlane:
816 case Intrinsic::amdgcn_readlane:
817 return true;
819 return false;
821 case ISD::LOAD:
822 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() ==
823 AMDGPUAS::CONSTANT_ADDRESS_32BIT)
824 return true;
825 return false;
826 case AMDGPUISD::SETCC: // ballot-style instruction
827 return true;
829 return false;
832 SDValue AMDGPUTargetLowering::getNegatedExpression(
833 SDValue Op, SelectionDAG &DAG, bool LegalOperations, bool ForCodeSize,
834 NegatibleCost &Cost, unsigned Depth) const {
836 switch (Op.getOpcode()) {
837 case ISD::FMA:
838 case ISD::FMAD: {
839 // Negating a fma is not free if it has users without source mods.
840 if (!allUsesHaveSourceMods(Op.getNode()))
841 return SDValue();
842 break;
844 case AMDGPUISD::RCP: {
845 SDValue Src = Op.getOperand(0);
846 EVT VT = Op.getValueType();
847 SDLoc SL(Op);
849 SDValue NegSrc = getNegatedExpression(Src, DAG, LegalOperations,
850 ForCodeSize, Cost, Depth + 1);
851 if (NegSrc)
852 return DAG.getNode(AMDGPUISD::RCP, SL, VT, NegSrc, Op->getFlags());
853 return SDValue();
855 default:
856 break;
859 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
860 ForCodeSize, Cost, Depth);
863 //===---------------------------------------------------------------------===//
864 // Target Properties
865 //===---------------------------------------------------------------------===//
867 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
868 assert(VT.isFloatingPoint());
870 // Packed operations do not have a fabs modifier.
871 return VT == MVT::f32 || VT == MVT::f64 ||
872 (Subtarget->has16BitInsts() && VT == MVT::f16);
875 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
876 assert(VT.isFloatingPoint());
877 // Report this based on the end legalized type.
878 VT = VT.getScalarType();
879 return VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f16;
882 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT,
883 unsigned NumElem,
884 unsigned AS) const {
885 return true;
888 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
889 // There are few operations which truly have vector input operands. Any vector
890 // operation is going to involve operations on each component, and a
891 // build_vector will be a copy per element, so it always makes sense to use a
892 // build_vector input in place of the extracted element to avoid a copy into a
893 // super register.
895 // We should probably only do this if all users are extracts only, but this
896 // should be the common case.
897 return true;
900 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
901 // Truncate is just accessing a subregister.
903 unsigned SrcSize = Source.getSizeInBits();
904 unsigned DestSize = Dest.getSizeInBits();
906 return DestSize < SrcSize && DestSize % 32 == 0 ;
909 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
910 // Truncate is just accessing a subregister.
912 unsigned SrcSize = Source->getScalarSizeInBits();
913 unsigned DestSize = Dest->getScalarSizeInBits();
915 if (DestSize== 16 && Subtarget->has16BitInsts())
916 return SrcSize >= 32;
918 return DestSize < SrcSize && DestSize % 32 == 0;
921 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
922 unsigned SrcSize = Src->getScalarSizeInBits();
923 unsigned DestSize = Dest->getScalarSizeInBits();
925 if (SrcSize == 16 && Subtarget->has16BitInsts())
926 return DestSize >= 32;
928 return SrcSize == 32 && DestSize == 64;
931 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
932 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
933 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
934 // this will enable reducing 64-bit operations the 32-bit, which is always
935 // good.
937 if (Src == MVT::i16)
938 return Dest == MVT::i32 ||Dest == MVT::i64 ;
940 return Src == MVT::i32 && Dest == MVT::i64;
943 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
944 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
945 // limited number of native 64-bit operations. Shrinking an operation to fit
946 // in a single 32-bit register should always be helpful. As currently used,
947 // this is much less general than the name suggests, and is only used in
948 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
949 // not profitable, and may actually be harmful.
950 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
953 bool AMDGPUTargetLowering::isDesirableToCommuteWithShift(
954 const SDNode* N, CombineLevel Level) const {
955 assert((N->getOpcode() == ISD::SHL || N->getOpcode() == ISD::SRA ||
956 N->getOpcode() == ISD::SRL) &&
957 "Expected shift op");
958 // Always commute pre-type legalization and right shifts.
959 // We're looking for shl(or(x,y),z) patterns.
960 if (Level < CombineLevel::AfterLegalizeTypes ||
961 N->getOpcode() != ISD::SHL || N->getOperand(0).getOpcode() != ISD::OR)
962 return true;
964 // If only user is a i32 right-shift, then don't destroy a BFE pattern.
965 if (N->getValueType(0) == MVT::i32 && N->use_size() == 1 &&
966 (N->use_begin()->getOpcode() == ISD::SRA ||
967 N->use_begin()->getOpcode() == ISD::SRL))
968 return false;
970 // Don't destroy or(shl(load_zext(),c), load_zext()) patterns.
971 auto IsShiftAndLoad = [](SDValue LHS, SDValue RHS) {
972 if (LHS.getOpcode() != ISD::SHL)
973 return false;
974 auto *RHSLd = dyn_cast<LoadSDNode>(RHS);
975 auto *LHS0 = dyn_cast<LoadSDNode>(LHS.getOperand(0));
976 auto *LHS1 = dyn_cast<ConstantSDNode>(LHS.getOperand(1));
977 return LHS0 && LHS1 && RHSLd && LHS0->getExtensionType() == ISD::ZEXTLOAD &&
978 LHS1->getAPIntValue() == LHS0->getMemoryVT().getScalarSizeInBits() &&
979 RHSLd->getExtensionType() == ISD::ZEXTLOAD;
981 SDValue LHS = N->getOperand(0).getOperand(0);
982 SDValue RHS = N->getOperand(0).getOperand(1);
983 return !(IsShiftAndLoad(LHS, RHS) || IsShiftAndLoad(RHS, LHS));
986 //===---------------------------------------------------------------------===//
987 // TargetLowering Callbacks
988 //===---------------------------------------------------------------------===//
990 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC,
991 bool IsVarArg) {
992 switch (CC) {
993 case CallingConv::AMDGPU_VS:
994 case CallingConv::AMDGPU_GS:
995 case CallingConv::AMDGPU_PS:
996 case CallingConv::AMDGPU_CS:
997 case CallingConv::AMDGPU_HS:
998 case CallingConv::AMDGPU_ES:
999 case CallingConv::AMDGPU_LS:
1000 return CC_AMDGPU;
1001 case CallingConv::C:
1002 case CallingConv::Fast:
1003 case CallingConv::Cold:
1004 return CC_AMDGPU_Func;
1005 case CallingConv::AMDGPU_Gfx:
1006 return CC_SI_Gfx;
1007 case CallingConv::AMDGPU_KERNEL:
1008 case CallingConv::SPIR_KERNEL:
1009 default:
1010 report_fatal_error("Unsupported calling convention for call");
1014 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC,
1015 bool IsVarArg) {
1016 switch (CC) {
1017 case CallingConv::AMDGPU_KERNEL:
1018 case CallingConv::SPIR_KERNEL:
1019 llvm_unreachable("kernels should not be handled here");
1020 case CallingConv::AMDGPU_VS:
1021 case CallingConv::AMDGPU_GS:
1022 case CallingConv::AMDGPU_PS:
1023 case CallingConv::AMDGPU_CS:
1024 case CallingConv::AMDGPU_HS:
1025 case CallingConv::AMDGPU_ES:
1026 case CallingConv::AMDGPU_LS:
1027 return RetCC_SI_Shader;
1028 case CallingConv::AMDGPU_Gfx:
1029 return RetCC_SI_Gfx;
1030 case CallingConv::C:
1031 case CallingConv::Fast:
1032 case CallingConv::Cold:
1033 return RetCC_AMDGPU_Func;
1034 default:
1035 report_fatal_error("Unsupported calling convention.");
1039 /// The SelectionDAGBuilder will automatically promote function arguments
1040 /// with illegal types. However, this does not work for the AMDGPU targets
1041 /// since the function arguments are stored in memory as these illegal types.
1042 /// In order to handle this properly we need to get the original types sizes
1043 /// from the LLVM IR Function and fixup the ISD:InputArg values before
1044 /// passing them to AnalyzeFormalArguments()
1046 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting
1047 /// input values across multiple registers. Each item in the Ins array
1048 /// represents a single value that will be stored in registers. Ins[x].VT is
1049 /// the value type of the value that will be stored in the register, so
1050 /// whatever SDNode we lower the argument to needs to be this type.
1052 /// In order to correctly lower the arguments we need to know the size of each
1053 /// argument. Since Ins[x].VT gives us the size of the register that will
1054 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type
1055 /// for the original function argument so that we can deduce the correct memory
1056 /// type to use for Ins[x]. In most cases the correct memory type will be
1057 /// Ins[x].ArgVT. However, this will not always be the case. If, for example,
1058 /// we have a kernel argument of type v8i8, this argument will be split into
1059 /// 8 parts and each part will be represented by its own item in the Ins array.
1060 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of
1061 /// the argument before it was split. From this, we deduce that the memory type
1062 /// for each individual part is i8. We pass the memory type as LocVT to the
1063 /// calling convention analysis function and the register type (Ins[x].VT) as
1064 /// the ValVT.
1065 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(
1066 CCState &State,
1067 const SmallVectorImpl<ISD::InputArg> &Ins) const {
1068 const MachineFunction &MF = State.getMachineFunction();
1069 const Function &Fn = MF.getFunction();
1070 LLVMContext &Ctx = Fn.getParent()->getContext();
1071 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
1072 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset();
1073 CallingConv::ID CC = Fn.getCallingConv();
1075 Align MaxAlign = Align(1);
1076 uint64_t ExplicitArgOffset = 0;
1077 const DataLayout &DL = Fn.getParent()->getDataLayout();
1079 unsigned InIndex = 0;
1081 for (const Argument &Arg : Fn.args()) {
1082 const bool IsByRef = Arg.hasByRefAttr();
1083 Type *BaseArgTy = Arg.getType();
1084 Type *MemArgTy = IsByRef ? Arg.getParamByRefType() : BaseArgTy;
1085 Align Alignment = DL.getValueOrABITypeAlignment(
1086 IsByRef ? Arg.getParamAlign() : std::nullopt, MemArgTy);
1087 MaxAlign = std::max(Alignment, MaxAlign);
1088 uint64_t AllocSize = DL.getTypeAllocSize(MemArgTy);
1090 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Alignment) + ExplicitOffset;
1091 ExplicitArgOffset = alignTo(ExplicitArgOffset, Alignment) + AllocSize;
1093 // We're basically throwing away everything passed into us and starting over
1094 // to get accurate in-memory offsets. The "PartOffset" is completely useless
1095 // to us as computed in Ins.
1097 // We also need to figure out what type legalization is trying to do to get
1098 // the correct memory offsets.
1100 SmallVector<EVT, 16> ValueVTs;
1101 SmallVector<uint64_t, 16> Offsets;
1102 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset);
1104 for (unsigned Value = 0, NumValues = ValueVTs.size();
1105 Value != NumValues; ++Value) {
1106 uint64_t BasePartOffset = Offsets[Value];
1108 EVT ArgVT = ValueVTs[Value];
1109 EVT MemVT = ArgVT;
1110 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT);
1111 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT);
1113 if (NumRegs == 1) {
1114 // This argument is not split, so the IR type is the memory type.
1115 if (ArgVT.isExtended()) {
1116 // We have an extended type, like i24, so we should just use the
1117 // register type.
1118 MemVT = RegisterVT;
1119 } else {
1120 MemVT = ArgVT;
1122 } else if (ArgVT.isVector() && RegisterVT.isVector() &&
1123 ArgVT.getScalarType() == RegisterVT.getScalarType()) {
1124 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements());
1125 // We have a vector value which has been split into a vector with
1126 // the same scalar type, but fewer elements. This should handle
1127 // all the floating-point vector types.
1128 MemVT = RegisterVT;
1129 } else if (ArgVT.isVector() &&
1130 ArgVT.getVectorNumElements() == NumRegs) {
1131 // This arg has been split so that each element is stored in a separate
1132 // register.
1133 MemVT = ArgVT.getScalarType();
1134 } else if (ArgVT.isExtended()) {
1135 // We have an extended type, like i65.
1136 MemVT = RegisterVT;
1137 } else {
1138 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs;
1139 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0);
1140 if (RegisterVT.isInteger()) {
1141 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits);
1142 } else if (RegisterVT.isVector()) {
1143 assert(!RegisterVT.getScalarType().isFloatingPoint());
1144 unsigned NumElements = RegisterVT.getVectorNumElements();
1145 assert(MemoryBits % NumElements == 0);
1146 // This vector type has been split into another vector type with
1147 // a different elements size.
1148 EVT ScalarVT = EVT::getIntegerVT(State.getContext(),
1149 MemoryBits / NumElements);
1150 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements);
1151 } else {
1152 llvm_unreachable("cannot deduce memory type.");
1156 // Convert one element vectors to scalar.
1157 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1)
1158 MemVT = MemVT.getScalarType();
1160 // Round up vec3/vec5 argument.
1161 if (MemVT.isVector() && !MemVT.isPow2VectorType()) {
1162 assert(MemVT.getVectorNumElements() == 3 ||
1163 MemVT.getVectorNumElements() == 5 ||
1164 (MemVT.getVectorNumElements() >= 9 &&
1165 MemVT.getVectorNumElements() <= 12));
1166 MemVT = MemVT.getPow2VectorType(State.getContext());
1167 } else if (!MemVT.isSimple() && !MemVT.isVector()) {
1168 MemVT = MemVT.getRoundIntegerType(State.getContext());
1171 unsigned PartOffset = 0;
1172 for (unsigned i = 0; i != NumRegs; ++i) {
1173 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT,
1174 BasePartOffset + PartOffset,
1175 MemVT.getSimpleVT(),
1176 CCValAssign::Full));
1177 PartOffset += MemVT.getStoreSize();
1183 SDValue AMDGPUTargetLowering::LowerReturn(
1184 SDValue Chain, CallingConv::ID CallConv,
1185 bool isVarArg,
1186 const SmallVectorImpl<ISD::OutputArg> &Outs,
1187 const SmallVectorImpl<SDValue> &OutVals,
1188 const SDLoc &DL, SelectionDAG &DAG) const {
1189 // FIXME: Fails for r600 tests
1190 //assert(!isVarArg && Outs.empty() && OutVals.empty() &&
1191 // "wave terminate should not have return values");
1192 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain);
1195 //===---------------------------------------------------------------------===//
1196 // Target specific lowering
1197 //===---------------------------------------------------------------------===//
1199 /// Selects the correct CCAssignFn for a given CallingConvention value.
1200 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1201 bool IsVarArg) {
1202 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg);
1205 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1206 bool IsVarArg) {
1207 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg);
1210 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain,
1211 SelectionDAG &DAG,
1212 MachineFrameInfo &MFI,
1213 int ClobberedFI) const {
1214 SmallVector<SDValue, 8> ArgChains;
1215 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI);
1216 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1;
1218 // Include the original chain at the beginning of the list. When this is
1219 // used by target LowerCall hooks, this helps legalize find the
1220 // CALLSEQ_BEGIN node.
1221 ArgChains.push_back(Chain);
1223 // Add a chain value for each stack argument corresponding
1224 for (SDNode *U : DAG.getEntryNode().getNode()->uses()) {
1225 if (LoadSDNode *L = dyn_cast<LoadSDNode>(U)) {
1226 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) {
1227 if (FI->getIndex() < 0) {
1228 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex());
1229 int64_t InLastByte = InFirstByte;
1230 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1;
1232 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1233 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1234 ArgChains.push_back(SDValue(L, 1));
1240 // Build a tokenfactor for all the chains.
1241 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains);
1244 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI,
1245 SmallVectorImpl<SDValue> &InVals,
1246 StringRef Reason) const {
1247 SDValue Callee = CLI.Callee;
1248 SelectionDAG &DAG = CLI.DAG;
1250 const Function &Fn = DAG.getMachineFunction().getFunction();
1252 StringRef FuncName("<unknown>");
1254 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
1255 FuncName = G->getSymbol();
1256 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1257 FuncName = G->getGlobal()->getName();
1259 DiagnosticInfoUnsupported NoCalls(
1260 Fn, Reason + FuncName, CLI.DL.getDebugLoc());
1261 DAG.getContext()->diagnose(NoCalls);
1263 if (!CLI.IsTailCall) {
1264 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I)
1265 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT));
1268 return DAG.getEntryNode();
1271 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
1272 SmallVectorImpl<SDValue> &InVals) const {
1273 return lowerUnhandledCall(CLI, InVals, "unsupported call to function ");
1276 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
1277 SelectionDAG &DAG) const {
1278 const Function &Fn = DAG.getMachineFunction().getFunction();
1280 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
1281 SDLoc(Op).getDebugLoc());
1282 DAG.getContext()->diagnose(NoDynamicAlloca);
1283 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)};
1284 return DAG.getMergeValues(Ops, SDLoc());
1287 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
1288 SelectionDAG &DAG) const {
1289 switch (Op.getOpcode()) {
1290 default:
1291 Op->print(errs(), &DAG);
1292 llvm_unreachable("Custom lowering code for this "
1293 "instruction is not implemented yet!");
1294 break;
1295 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
1296 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
1297 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
1298 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
1299 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
1300 case ISD::FREM: return LowerFREM(Op, DAG);
1301 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
1302 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
1303 case ISD::FRINT: return LowerFRINT(Op, DAG);
1304 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
1305 case ISD::FROUNDEVEN:
1306 return LowerFROUNDEVEN(Op, DAG);
1307 case ISD::FROUND: return LowerFROUND(Op, DAG);
1308 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
1309 case ISD::FLOG2:
1310 return LowerFLOG2(Op, DAG);
1311 case ISD::FLOG:
1312 return LowerFLOG(Op, DAG, numbers::ln2);
1313 case ISD::FLOG10:
1314 return LowerFLOG(Op, DAG, numbers::ln2 / numbers::ln10);
1315 case ISD::FEXP:
1316 return lowerFEXP(Op, DAG);
1317 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
1318 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
1319 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
1320 case ISD::FP_TO_SINT:
1321 case ISD::FP_TO_UINT:
1322 return LowerFP_TO_INT(Op, DAG);
1323 case ISD::CTTZ:
1324 case ISD::CTTZ_ZERO_UNDEF:
1325 case ISD::CTLZ:
1326 case ISD::CTLZ_ZERO_UNDEF:
1327 return LowerCTLZ_CTTZ(Op, DAG);
1328 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1330 return Op;
1333 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
1334 SmallVectorImpl<SDValue> &Results,
1335 SelectionDAG &DAG) const {
1336 switch (N->getOpcode()) {
1337 case ISD::SIGN_EXTEND_INREG:
1338 // Different parts of legalization seem to interpret which type of
1339 // sign_extend_inreg is the one to check for custom lowering. The extended
1340 // from type is what really matters, but some places check for custom
1341 // lowering of the result type. This results in trying to use
1342 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
1343 // nothing here and let the illegal result integer be handled normally.
1344 return;
1345 case ISD::FLOG2:
1346 if (SDValue Lowered = LowerFLOG2(SDValue(N, 0), DAG))
1347 Results.push_back(Lowered);
1348 return;
1349 default:
1350 return;
1354 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
1355 SDValue Op,
1356 SelectionDAG &DAG) const {
1358 const DataLayout &DL = DAG.getDataLayout();
1359 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
1360 const GlobalValue *GV = G->getGlobal();
1362 if (!MFI->isModuleEntryFunction()) {
1363 if (std::optional<uint32_t> Address =
1364 AMDGPUMachineFunction::getLDSAbsoluteAddress(*GV)) {
1365 return DAG.getConstant(*Address, SDLoc(Op), Op.getValueType());
1369 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1370 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) {
1371 if (!MFI->isModuleEntryFunction() &&
1372 !GV->getName().equals("llvm.amdgcn.module.lds")) {
1373 SDLoc DL(Op);
1374 const Function &Fn = DAG.getMachineFunction().getFunction();
1375 DiagnosticInfoUnsupported BadLDSDecl(
1376 Fn, "local memory global used by non-kernel function",
1377 DL.getDebugLoc(), DS_Warning);
1378 DAG.getContext()->diagnose(BadLDSDecl);
1380 // We currently don't have a way to correctly allocate LDS objects that
1381 // aren't directly associated with a kernel. We do force inlining of
1382 // functions that use local objects. However, if these dead functions are
1383 // not eliminated, we don't want a compile time error. Just emit a warning
1384 // and a trap, since there should be no callable path here.
1385 SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode());
1386 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1387 Trap, DAG.getRoot());
1388 DAG.setRoot(OutputChain);
1389 return DAG.getUNDEF(Op.getValueType());
1392 // XXX: What does the value of G->getOffset() mean?
1393 assert(G->getOffset() == 0 &&
1394 "Do not know what to do with an non-zero offset");
1396 // TODO: We could emit code to handle the initialization somewhere.
1397 // We ignore the initializer for now and legalize it to allow selection.
1398 // The initializer will anyway get errored out during assembly emission.
1399 unsigned Offset = MFI->allocateLDSGlobal(DL, *cast<GlobalVariable>(GV));
1400 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType());
1402 return SDValue();
1405 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
1406 SelectionDAG &DAG) const {
1407 SmallVector<SDValue, 8> Args;
1408 SDLoc SL(Op);
1410 EVT VT = Op.getValueType();
1411 if (VT.getVectorElementType().getSizeInBits() < 32) {
1412 unsigned OpBitSize = Op.getOperand(0).getValueType().getSizeInBits();
1413 if (OpBitSize >= 32 && OpBitSize % 32 == 0) {
1414 unsigned NewNumElt = OpBitSize / 32;
1415 EVT NewEltVT = (NewNumElt == 1) ? MVT::i32
1416 : EVT::getVectorVT(*DAG.getContext(),
1417 MVT::i32, NewNumElt);
1418 for (const SDUse &U : Op->ops()) {
1419 SDValue In = U.get();
1420 SDValue NewIn = DAG.getNode(ISD::BITCAST, SL, NewEltVT, In);
1421 if (NewNumElt > 1)
1422 DAG.ExtractVectorElements(NewIn, Args);
1423 else
1424 Args.push_back(NewIn);
1427 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
1428 NewNumElt * Op.getNumOperands());
1429 SDValue BV = DAG.getBuildVector(NewVT, SL, Args);
1430 return DAG.getNode(ISD::BITCAST, SL, VT, BV);
1434 for (const SDUse &U : Op->ops())
1435 DAG.ExtractVectorElements(U.get(), Args);
1437 return DAG.getBuildVector(Op.getValueType(), SL, Args);
1440 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
1441 SelectionDAG &DAG) const {
1442 SDLoc SL(Op);
1443 SmallVector<SDValue, 8> Args;
1444 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1445 EVT VT = Op.getValueType();
1446 EVT SrcVT = Op.getOperand(0).getValueType();
1448 if (VT.getScalarSizeInBits() == 16 && Start % 2 == 0) {
1449 unsigned NumElt = VT.getVectorNumElements();
1450 unsigned NumSrcElt = SrcVT.getVectorNumElements();
1451 assert(NumElt % 2 == 0 && NumSrcElt % 2 == 0 && "expect legal types");
1453 // Extract 32-bit registers at a time.
1454 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumSrcElt / 2);
1455 EVT NewVT = NumElt == 2
1456 ? MVT::i32
1457 : EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElt / 2);
1458 SDValue Tmp = DAG.getNode(ISD::BITCAST, SL, NewSrcVT, Op.getOperand(0));
1460 DAG.ExtractVectorElements(Tmp, Args, Start / 2, NumElt / 2);
1461 if (NumElt == 2)
1462 Tmp = Args[0];
1463 else
1464 Tmp = DAG.getBuildVector(NewVT, SL, Args);
1466 return DAG.getNode(ISD::BITCAST, SL, VT, Tmp);
1469 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
1470 VT.getVectorNumElements());
1472 return DAG.getBuildVector(Op.getValueType(), SL, Args);
1475 // TODO: Handle fabs too
1476 static SDValue peekFNeg(SDValue Val) {
1477 if (Val.getOpcode() == ISD::FNEG)
1478 return Val.getOperand(0);
1480 return Val;
1482 SDValue AMDGPUTargetLowering::combineFMinMaxLegacyImpl(
1483 const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, SDValue True,
1484 SDValue False, SDValue CC, DAGCombinerInfo &DCI) const {
1485 SelectionDAG &DAG = DCI.DAG;
1486 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1487 switch (CCOpcode) {
1488 case ISD::SETOEQ:
1489 case ISD::SETONE:
1490 case ISD::SETUNE:
1491 case ISD::SETNE:
1492 case ISD::SETUEQ:
1493 case ISD::SETEQ:
1494 case ISD::SETFALSE:
1495 case ISD::SETFALSE2:
1496 case ISD::SETTRUE:
1497 case ISD::SETTRUE2:
1498 case ISD::SETUO:
1499 case ISD::SETO:
1500 break;
1501 case ISD::SETULE:
1502 case ISD::SETULT: {
1503 if (LHS == True)
1504 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1505 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1507 case ISD::SETOLE:
1508 case ISD::SETOLT:
1509 case ISD::SETLE:
1510 case ISD::SETLT: {
1511 // Ordered. Assume ordered for undefined.
1513 // Only do this after legalization to avoid interfering with other combines
1514 // which might occur.
1515 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1516 !DCI.isCalledByLegalizer())
1517 return SDValue();
1519 // We need to permute the operands to get the correct NaN behavior. The
1520 // selected operand is the second one based on the failing compare with NaN,
1521 // so permute it based on the compare type the hardware uses.
1522 if (LHS == True)
1523 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1524 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1526 case ISD::SETUGE:
1527 case ISD::SETUGT: {
1528 if (LHS == True)
1529 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1530 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1532 case ISD::SETGT:
1533 case ISD::SETGE:
1534 case ISD::SETOGE:
1535 case ISD::SETOGT: {
1536 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1537 !DCI.isCalledByLegalizer())
1538 return SDValue();
1540 if (LHS == True)
1541 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1542 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1544 case ISD::SETCC_INVALID:
1545 llvm_unreachable("Invalid setcc condcode!");
1547 return SDValue();
1550 /// Generate Min/Max node
1551 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT,
1552 SDValue LHS, SDValue RHS,
1553 SDValue True, SDValue False,
1554 SDValue CC,
1555 DAGCombinerInfo &DCI) const {
1556 if ((LHS == True && RHS == False) || (LHS == False && RHS == True))
1557 return combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, True, False, CC, DCI);
1559 SelectionDAG &DAG = DCI.DAG;
1561 // If we can't directly match this, try to see if we can fold an fneg to
1562 // match.
1564 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
1565 ConstantFPSDNode *CFalse = dyn_cast<ConstantFPSDNode>(False);
1566 SDValue NegTrue = peekFNeg(True);
1568 // Undo the combine foldFreeOpFromSelect does if it helps us match the
1569 // fmin/fmax.
1571 // select (fcmp olt (lhs, K)), (fneg lhs), -K
1572 // -> fneg (fmin_legacy lhs, K)
1574 // TODO: Use getNegatedExpression
1575 if (LHS == NegTrue && CFalse && CRHS) {
1576 APFloat NegRHS = neg(CRHS->getValueAPF());
1577 if (NegRHS == CFalse->getValueAPF()) {
1578 SDValue Combined =
1579 combineFMinMaxLegacyImpl(DL, VT, LHS, RHS, NegTrue, False, CC, DCI);
1580 if (Combined)
1581 return DAG.getNode(ISD::FNEG, DL, VT, Combined);
1582 return SDValue();
1586 return SDValue();
1589 std::pair<SDValue, SDValue>
1590 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1591 SDLoc SL(Op);
1593 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1595 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1596 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1598 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1599 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1601 return std::pair(Lo, Hi);
1604 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1605 SDLoc SL(Op);
1607 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1608 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1609 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1612 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1613 SDLoc SL(Op);
1615 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1616 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1617 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1620 // Split a vector type into two parts. The first part is a power of two vector.
1621 // The second part is whatever is left over, and is a scalar if it would
1622 // otherwise be a 1-vector.
1623 std::pair<EVT, EVT>
1624 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const {
1625 EVT LoVT, HiVT;
1626 EVT EltVT = VT.getVectorElementType();
1627 unsigned NumElts = VT.getVectorNumElements();
1628 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2);
1629 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts);
1630 HiVT = NumElts - LoNumElts == 1
1631 ? EltVT
1632 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts);
1633 return std::pair(LoVT, HiVT);
1636 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be
1637 // scalar.
1638 std::pair<SDValue, SDValue>
1639 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL,
1640 const EVT &LoVT, const EVT &HiVT,
1641 SelectionDAG &DAG) const {
1642 assert(LoVT.getVectorNumElements() +
1643 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <=
1644 N.getValueType().getVectorNumElements() &&
1645 "More vector elements requested than available!");
1646 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N,
1647 DAG.getVectorIdxConstant(0, DL));
1648 SDValue Hi = DAG.getNode(
1649 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL,
1650 HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL));
1651 return std::pair(Lo, Hi);
1654 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1655 SelectionDAG &DAG) const {
1656 LoadSDNode *Load = cast<LoadSDNode>(Op);
1657 EVT VT = Op.getValueType();
1658 SDLoc SL(Op);
1661 // If this is a 2 element vector, we really want to scalarize and not create
1662 // weird 1 element vectors.
1663 if (VT.getVectorNumElements() == 2) {
1664 SDValue Ops[2];
1665 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
1666 return DAG.getMergeValues(Ops, SL);
1669 SDValue BasePtr = Load->getBasePtr();
1670 EVT MemVT = Load->getMemoryVT();
1672 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1674 EVT LoVT, HiVT;
1675 EVT LoMemVT, HiMemVT;
1676 SDValue Lo, Hi;
1678 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1679 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1680 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG);
1682 unsigned Size = LoMemVT.getStoreSize();
1683 Align BaseAlign = Load->getAlign();
1684 Align HiAlign = commonAlignment(BaseAlign, Size);
1686 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1687 Load->getChain(), BasePtr, SrcValue, LoMemVT,
1688 BaseAlign, Load->getMemOperand()->getFlags());
1689 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, TypeSize::Fixed(Size));
1690 SDValue HiLoad =
1691 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(),
1692 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1693 HiMemVT, HiAlign, Load->getMemOperand()->getFlags());
1695 SDValue Join;
1696 if (LoVT == HiVT) {
1697 // This is the case that the vector is power of two so was evenly split.
1698 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad);
1699 } else {
1700 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad,
1701 DAG.getVectorIdxConstant(0, SL));
1702 Join = DAG.getNode(
1703 HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL,
1704 VT, Join, HiLoad,
1705 DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL));
1708 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1709 LoLoad.getValue(1), HiLoad.getValue(1))};
1711 return DAG.getMergeValues(Ops, SL);
1714 SDValue AMDGPUTargetLowering::WidenOrSplitVectorLoad(SDValue Op,
1715 SelectionDAG &DAG) const {
1716 LoadSDNode *Load = cast<LoadSDNode>(Op);
1717 EVT VT = Op.getValueType();
1718 SDValue BasePtr = Load->getBasePtr();
1719 EVT MemVT = Load->getMemoryVT();
1720 SDLoc SL(Op);
1721 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1722 Align BaseAlign = Load->getAlign();
1723 unsigned NumElements = MemVT.getVectorNumElements();
1725 // Widen from vec3 to vec4 when the load is at least 8-byte aligned
1726 // or 16-byte fully dereferenceable. Otherwise, split the vector load.
1727 if (NumElements != 3 ||
1728 (BaseAlign < Align(8) &&
1729 !SrcValue.isDereferenceable(16, *DAG.getContext(), DAG.getDataLayout())))
1730 return SplitVectorLoad(Op, DAG);
1732 assert(NumElements == 3);
1734 EVT WideVT =
1735 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4);
1736 EVT WideMemVT =
1737 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4);
1738 SDValue WideLoad = DAG.getExtLoad(
1739 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue,
1740 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags());
1741 return DAG.getMergeValues(
1742 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad,
1743 DAG.getVectorIdxConstant(0, SL)),
1744 WideLoad.getValue(1)},
1745 SL);
1748 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1749 SelectionDAG &DAG) const {
1750 StoreSDNode *Store = cast<StoreSDNode>(Op);
1751 SDValue Val = Store->getValue();
1752 EVT VT = Val.getValueType();
1754 // If this is a 2 element vector, we really want to scalarize and not create
1755 // weird 1 element vectors.
1756 if (VT.getVectorNumElements() == 2)
1757 return scalarizeVectorStore(Store, DAG);
1759 EVT MemVT = Store->getMemoryVT();
1760 SDValue Chain = Store->getChain();
1761 SDValue BasePtr = Store->getBasePtr();
1762 SDLoc SL(Op);
1764 EVT LoVT, HiVT;
1765 EVT LoMemVT, HiMemVT;
1766 SDValue Lo, Hi;
1768 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG);
1769 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG);
1770 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG);
1772 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize());
1774 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1775 Align BaseAlign = Store->getAlign();
1776 unsigned Size = LoMemVT.getStoreSize();
1777 Align HiAlign = commonAlignment(BaseAlign, Size);
1779 SDValue LoStore =
1780 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign,
1781 Store->getMemOperand()->getFlags());
1782 SDValue HiStore =
1783 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size),
1784 HiMemVT, HiAlign, Store->getMemOperand()->getFlags());
1786 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1789 // This is a shortcut for integer division because we have fast i32<->f32
1790 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1791 // float is enough to accurately represent up to a 24-bit signed integer.
1792 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG,
1793 bool Sign) const {
1794 SDLoc DL(Op);
1795 EVT VT = Op.getValueType();
1796 SDValue LHS = Op.getOperand(0);
1797 SDValue RHS = Op.getOperand(1);
1798 MVT IntVT = MVT::i32;
1799 MVT FltVT = MVT::f32;
1801 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS);
1802 if (LHSSignBits < 9)
1803 return SDValue();
1805 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS);
1806 if (RHSSignBits < 9)
1807 return SDValue();
1809 unsigned BitSize = VT.getSizeInBits();
1810 unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
1811 unsigned DivBits = BitSize - SignBits;
1812 if (Sign)
1813 ++DivBits;
1815 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1816 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1818 SDValue jq = DAG.getConstant(1, DL, IntVT);
1820 if (Sign) {
1821 // char|short jq = ia ^ ib;
1822 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1824 // jq = jq >> (bitsize - 2)
1825 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1826 DAG.getConstant(BitSize - 2, DL, VT));
1828 // jq = jq | 0x1
1829 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1832 // int ia = (int)LHS;
1833 SDValue ia = LHS;
1835 // int ib, (int)RHS;
1836 SDValue ib = RHS;
1838 // float fa = (float)ia;
1839 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1841 // float fb = (float)ib;
1842 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1844 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1845 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1847 // fq = trunc(fq);
1848 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1850 // float fqneg = -fq;
1851 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1853 MachineFunction &MF = DAG.getMachineFunction();
1855 bool UseFmadFtz = false;
1856 if (Subtarget->isGCN()) {
1857 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1858 UseFmadFtz = MFI->getMode().allFP32Denormals();
1861 // float fr = mad(fqneg, fb, fa);
1862 unsigned OpCode = !Subtarget->hasMadMacF32Insts() ? (unsigned)ISD::FMA
1863 : UseFmadFtz ? (unsigned)AMDGPUISD::FMAD_FTZ
1864 : (unsigned)ISD::FMAD;
1865 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa);
1867 // int iq = (int)fq;
1868 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1870 // fr = fabs(fr);
1871 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1873 // fb = fabs(fb);
1874 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1876 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1878 // int cv = fr >= fb;
1879 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1881 // jq = (cv ? jq : 0);
1882 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1884 // dst = iq + jq;
1885 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1887 // Rem needs compensation, it's easier to recompute it
1888 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1889 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1891 // Truncate to number of bits this divide really is.
1892 if (Sign) {
1893 SDValue InRegSize
1894 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits));
1895 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize);
1896 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize);
1897 } else {
1898 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT);
1899 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask);
1900 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask);
1903 return DAG.getMergeValues({ Div, Rem }, DL);
1906 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1907 SelectionDAG &DAG,
1908 SmallVectorImpl<SDValue> &Results) const {
1909 SDLoc DL(Op);
1910 EVT VT = Op.getValueType();
1912 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64");
1914 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1916 SDValue One = DAG.getConstant(1, DL, HalfVT);
1917 SDValue Zero = DAG.getConstant(0, DL, HalfVT);
1919 //HiLo split
1920 SDValue LHS_Lo, LHS_Hi;
1921 SDValue LHS = Op.getOperand(0);
1922 std::tie(LHS_Lo, LHS_Hi) = DAG.SplitScalar(LHS, DL, HalfVT, HalfVT);
1924 SDValue RHS_Lo, RHS_Hi;
1925 SDValue RHS = Op.getOperand(1);
1926 std::tie(RHS_Lo, RHS_Hi) = DAG.SplitScalar(RHS, DL, HalfVT, HalfVT);
1928 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1929 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1931 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1932 LHS_Lo, RHS_Lo);
1934 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero});
1935 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero});
1937 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1938 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1939 return;
1942 if (isTypeLegal(MVT::i64)) {
1943 // The algorithm here is based on ideas from "Software Integer Division",
1944 // Tom Rodeheffer, August 2008.
1946 MachineFunction &MF = DAG.getMachineFunction();
1947 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1949 // Compute denominator reciprocal.
1950 unsigned FMAD = !Subtarget->hasMadMacF32Insts() ?
1951 (unsigned)ISD::FMA :
1952 !MFI->getMode().allFP32Denormals() ?
1953 (unsigned)ISD::FMAD :
1954 (unsigned)AMDGPUISD::FMAD_FTZ;
1956 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo);
1957 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi);
1958 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi,
1959 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32),
1960 Cvt_Lo);
1961 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1);
1962 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp,
1963 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32));
1964 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1,
1965 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32));
1966 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2);
1967 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc,
1968 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32),
1969 Mul1);
1970 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2);
1971 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc);
1972 SDValue Rcp64 = DAG.getBitcast(VT,
1973 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi}));
1975 SDValue Zero64 = DAG.getConstant(0, DL, VT);
1976 SDValue One64 = DAG.getConstant(1, DL, VT);
1977 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1);
1978 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1);
1980 // First round of UNR (Unsigned integer Newton-Raphson).
1981 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS);
1982 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64);
1983 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1);
1984 SDValue Mulhi1_Lo, Mulhi1_Hi;
1985 std::tie(Mulhi1_Lo, Mulhi1_Hi) =
1986 DAG.SplitScalar(Mulhi1, DL, HalfVT, HalfVT);
1987 SDValue Add1_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Lo,
1988 Mulhi1_Lo, Zero1);
1989 SDValue Add1_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Rcp_Hi,
1990 Mulhi1_Hi, Add1_Lo.getValue(1));
1991 SDValue Add1 = DAG.getBitcast(VT,
1992 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi}));
1994 // Second round of UNR.
1995 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1);
1996 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2);
1997 SDValue Mulhi2_Lo, Mulhi2_Hi;
1998 std::tie(Mulhi2_Lo, Mulhi2_Hi) =
1999 DAG.SplitScalar(Mulhi2, DL, HalfVT, HalfVT);
2000 SDValue Add2_Lo = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Lo,
2001 Mulhi2_Lo, Zero1);
2002 SDValue Add2_Hi = DAG.getNode(ISD::UADDO_CARRY, DL, HalfCarryVT, Add1_Hi,
2003 Mulhi2_Hi, Add2_Lo.getValue(1));
2004 SDValue Add2 = DAG.getBitcast(VT,
2005 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi}));
2007 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2);
2009 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3);
2011 SDValue Mul3_Lo, Mul3_Hi;
2012 std::tie(Mul3_Lo, Mul3_Hi) = DAG.SplitScalar(Mul3, DL, HalfVT, HalfVT);
2013 SDValue Sub1_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Lo,
2014 Mul3_Lo, Zero1);
2015 SDValue Sub1_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, LHS_Hi,
2016 Mul3_Hi, Sub1_Lo.getValue(1));
2017 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi);
2018 SDValue Sub1 = DAG.getBitcast(VT,
2019 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi}));
2021 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT);
2022 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero,
2023 ISD::SETUGE);
2024 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero,
2025 ISD::SETUGE);
2026 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ);
2028 // TODO: Here and below portions of the code can be enclosed into if/endif.
2029 // Currently control flow is unconditional and we have 4 selects after
2030 // potential endif to substitute PHIs.
2032 // if C3 != 0 ...
2033 SDValue Sub2_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Lo,
2034 RHS_Lo, Zero1);
2035 SDValue Sub2_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub1_Mi,
2036 RHS_Hi, Sub1_Lo.getValue(1));
2037 SDValue Sub2_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2038 Zero, Sub2_Lo.getValue(1));
2039 SDValue Sub2 = DAG.getBitcast(VT,
2040 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi}));
2042 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64);
2044 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero,
2045 ISD::SETUGE);
2046 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero,
2047 ISD::SETUGE);
2048 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ);
2050 // if (C6 != 0)
2051 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64);
2053 SDValue Sub3_Lo = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Lo,
2054 RHS_Lo, Zero1);
2055 SDValue Sub3_Mi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub2_Mi,
2056 RHS_Hi, Sub2_Lo.getValue(1));
2057 SDValue Sub3_Hi = DAG.getNode(ISD::USUBO_CARRY, DL, HalfCarryVT, Sub3_Mi,
2058 Zero, Sub3_Lo.getValue(1));
2059 SDValue Sub3 = DAG.getBitcast(VT,
2060 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi}));
2062 // endif C6
2063 // endif C3
2065 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE);
2066 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE);
2068 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE);
2069 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE);
2071 Results.push_back(Div);
2072 Results.push_back(Rem);
2074 return;
2077 // r600 expandion.
2078 // Get Speculative values
2079 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
2080 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
2082 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ);
2083 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero});
2084 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
2086 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ);
2087 SDValue DIV_Lo = Zero;
2089 const unsigned halfBitWidth = HalfVT.getSizeInBits();
2091 for (unsigned i = 0; i < halfBitWidth; ++i) {
2092 const unsigned bitPos = halfBitWidth - i - 1;
2093 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
2094 // Get value of high bit
2095 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
2096 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One);
2097 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
2099 // Shift
2100 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
2101 // Add LHS high bit
2102 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
2104 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
2105 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE);
2107 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
2109 // Update REM
2110 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
2111 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
2114 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
2115 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
2116 Results.push_back(DIV);
2117 Results.push_back(REM);
2120 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
2121 SelectionDAG &DAG) const {
2122 SDLoc DL(Op);
2123 EVT VT = Op.getValueType();
2125 if (VT == MVT::i64) {
2126 SmallVector<SDValue, 2> Results;
2127 LowerUDIVREM64(Op, DAG, Results);
2128 return DAG.getMergeValues(Results, DL);
2131 if (VT == MVT::i32) {
2132 if (SDValue Res = LowerDIVREM24(Op, DAG, false))
2133 return Res;
2136 SDValue X = Op.getOperand(0);
2137 SDValue Y = Op.getOperand(1);
2139 // See AMDGPUCodeGenPrepare::expandDivRem32 for a description of the
2140 // algorithm used here.
2142 // Initial estimate of inv(y).
2143 SDValue Z = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Y);
2145 // One round of UNR.
2146 SDValue NegY = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Y);
2147 SDValue NegYZ = DAG.getNode(ISD::MUL, DL, VT, NegY, Z);
2148 Z = DAG.getNode(ISD::ADD, DL, VT, Z,
2149 DAG.getNode(ISD::MULHU, DL, VT, Z, NegYZ));
2151 // Quotient/remainder estimate.
2152 SDValue Q = DAG.getNode(ISD::MULHU, DL, VT, X, Z);
2153 SDValue R =
2154 DAG.getNode(ISD::SUB, DL, VT, X, DAG.getNode(ISD::MUL, DL, VT, Q, Y));
2156 // First quotient/remainder refinement.
2157 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2158 SDValue One = DAG.getConstant(1, DL, VT);
2159 SDValue Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2160 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2161 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2162 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2163 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2165 // Second quotient/remainder refinement.
2166 Cond = DAG.getSetCC(DL, CCVT, R, Y, ISD::SETUGE);
2167 Q = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2168 DAG.getNode(ISD::ADD, DL, VT, Q, One), Q);
2169 R = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2170 DAG.getNode(ISD::SUB, DL, VT, R, Y), R);
2172 return DAG.getMergeValues({Q, R}, DL);
2175 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
2176 SelectionDAG &DAG) const {
2177 SDLoc DL(Op);
2178 EVT VT = Op.getValueType();
2180 SDValue LHS = Op.getOperand(0);
2181 SDValue RHS = Op.getOperand(1);
2183 SDValue Zero = DAG.getConstant(0, DL, VT);
2184 SDValue NegOne = DAG.getConstant(-1, DL, VT);
2186 if (VT == MVT::i32) {
2187 if (SDValue Res = LowerDIVREM24(Op, DAG, true))
2188 return Res;
2191 if (VT == MVT::i64 &&
2192 DAG.ComputeNumSignBits(LHS) > 32 &&
2193 DAG.ComputeNumSignBits(RHS) > 32) {
2194 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
2196 //HiLo split
2197 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
2198 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
2199 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
2200 LHS_Lo, RHS_Lo);
2201 SDValue Res[2] = {
2202 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
2203 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
2205 return DAG.getMergeValues(Res, DL);
2208 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
2209 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
2210 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
2211 SDValue RSign = LHSign; // Remainder sign is the same as LHS
2213 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
2214 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
2216 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
2217 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
2219 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
2220 SDValue Rem = Div.getValue(1);
2222 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
2223 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
2225 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
2226 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
2228 SDValue Res[2] = {
2229 Div,
2232 return DAG.getMergeValues(Res, DL);
2235 // (frem x, y) -> (fma (fneg (ftrunc (fdiv x, y))), y, x)
2236 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
2237 SDLoc SL(Op);
2238 EVT VT = Op.getValueType();
2239 auto Flags = Op->getFlags();
2240 SDValue X = Op.getOperand(0);
2241 SDValue Y = Op.getOperand(1);
2243 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y, Flags);
2244 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, VT, Div, Flags);
2245 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Trunc, Flags);
2246 // TODO: For f32 use FMAD instead if !hasFastFMA32?
2247 return DAG.getNode(ISD::FMA, SL, VT, Neg, Y, X, Flags);
2250 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
2251 SDLoc SL(Op);
2252 SDValue Src = Op.getOperand(0);
2254 // result = trunc(src)
2255 // if (src > 0.0 && src != result)
2256 // result += 1.0
2258 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2260 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2261 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
2263 EVT SetCCVT =
2264 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2266 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
2267 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2268 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2270 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
2271 // TODO: Should this propagate fast-math-flags?
2272 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2275 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL,
2276 SelectionDAG &DAG) {
2277 const unsigned FractBits = 52;
2278 const unsigned ExpBits = 11;
2280 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
2282 DAG.getConstant(FractBits - 32, SL, MVT::i32),
2283 DAG.getConstant(ExpBits, SL, MVT::i32));
2284 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
2285 DAG.getConstant(1023, SL, MVT::i32));
2287 return Exp;
2290 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
2291 SDLoc SL(Op);
2292 SDValue Src = Op.getOperand(0);
2294 assert(Op.getValueType() == MVT::f64);
2296 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2298 // Extract the upper half, since this is where we will find the sign and
2299 // exponent.
2300 SDValue Hi = getHiHalf64(Src, DAG);
2302 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2304 const unsigned FractBits = 52;
2306 // Extract the sign bit.
2307 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
2308 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
2310 // Extend back to 64-bits.
2311 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
2312 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
2314 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2315 const SDValue FractMask
2316 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2318 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2319 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2320 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2322 EVT SetCCVT =
2323 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2325 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2327 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2328 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2330 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2331 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2333 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2336 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2337 SDLoc SL(Op);
2338 SDValue Src = Op.getOperand(0);
2340 assert(Op.getValueType() == MVT::f64);
2342 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52");
2343 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2344 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2346 // TODO: Should this propagate fast-math-flags?
2348 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2349 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2351 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2353 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51");
2354 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2356 EVT SetCCVT =
2357 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2358 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2360 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2363 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2364 // FNEARBYINT and FRINT are the same, except in their handling of FP
2365 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2366 // rint, so just treat them as equivalent.
2367 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2370 SDValue AMDGPUTargetLowering::LowerFROUNDEVEN(SDValue Op,
2371 SelectionDAG &DAG) const {
2372 auto VT = Op.getValueType();
2373 auto Arg = Op.getOperand(0u);
2374 return DAG.getNode(ISD::FRINT, SDLoc(Op), VT, Arg);
2377 // XXX - May require not supporting f32 denormals?
2379 // Don't handle v2f16. The extra instructions to scalarize and repack around the
2380 // compare and vselect end up producing worse code than scalarizing the whole
2381 // operation.
2382 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2383 SDLoc SL(Op);
2384 SDValue X = Op.getOperand(0);
2385 EVT VT = Op.getValueType();
2387 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X);
2389 // TODO: Should this propagate fast-math-flags?
2391 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T);
2393 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff);
2395 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2396 const SDValue One = DAG.getConstantFP(1.0, SL, VT);
2397 const SDValue Half = DAG.getConstantFP(0.5, SL, VT);
2399 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X);
2401 EVT SetCCVT =
2402 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
2404 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2406 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero);
2408 return DAG.getNode(ISD::FADD, SL, VT, T, Sel);
2411 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2412 SDLoc SL(Op);
2413 SDValue Src = Op.getOperand(0);
2415 // result = trunc(src);
2416 // if (src < 0.0 && src != result)
2417 // result += -1.0.
2419 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2421 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2422 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2424 EVT SetCCVT =
2425 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2427 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2428 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2429 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2431 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2432 // TODO: Should this propagate fast-math-flags?
2433 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2436 /// Return true if it's known that \p Src can never be an f32 denormal value.
2437 static bool valueIsKnownNeverF32Denorm(SDValue Src) {
2438 switch (Src.getOpcode()) {
2439 case ISD::FP_EXTEND:
2440 return Src.getOperand(0).getValueType() == MVT::f16;
2441 case ISD::FP16_TO_FP:
2442 return true;
2443 default:
2444 return false;
2447 llvm_unreachable("covered opcode switch");
2450 SDValue AMDGPUTargetLowering::LowerFLOG2(SDValue Op, SelectionDAG &DAG) const {
2451 // v_log_f32 is good enough for OpenCL, except it doesn't handle denormals.
2452 // If we have to handle denormals, scale up the input and adjust the result.
2454 // scaled = x * (is_denormal ? 0x1.0p+32 : 1.0)
2455 // log2 = amdgpu_log2 - (is_denormal ? 32.0 : 0.0)
2457 SDLoc SL(Op);
2458 EVT VT = Op.getValueType();
2459 SDValue Src = Op.getOperand(0);
2460 SDNodeFlags Flags = Op->getFlags();
2462 if (VT == MVT::f16) {
2463 // Nothing in half is a denormal when promoted to f32.
2464 assert(!Subtarget->has16BitInsts());
2465 SDValue Ext = DAG.getNode(ISD::FP_EXTEND, SL, MVT::f32, Src, Flags);
2466 SDValue Log = DAG.getNode(AMDGPUISD::LOG, SL, MVT::f32, Ext, Flags);
2467 return DAG.getNode(ISD::FP_ROUND, SL, VT, Log,
2468 DAG.getTargetConstant(0, SL, MVT::i32), Flags);
2471 bool NeedDenormHandling =
2472 !Flags.hasApproximateFuncs() && !DAG.getTarget().Options.UnsafeFPMath &&
2473 !DAG.getTarget().Options.ApproxFuncFPMath &&
2474 !valueIsKnownNeverF32Denorm(Src) &&
2475 DAG.getDenormalMode(VT).Input != DenormalMode::PreserveSign;
2477 if (!NeedDenormHandling)
2478 return DAG.getNode(AMDGPUISD::LOG, SL, VT, Src, Flags);
2480 const fltSemantics &Semantics = APFloat::IEEEsingle();
2481 SDValue SmallestNormal =
2482 DAG.getConstantFP(APFloat::getSmallestNormalized(Semantics), SL, VT);
2484 // Want to scale denormals up, but negatives and 0 work just as well on the
2485 // scaled path.
2486 SDValue IsLtSmallestNormal = DAG.getSetCC(
2487 SL, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), Src,
2488 SmallestNormal, ISD::SETOLT);
2490 SDValue Scale32 = DAG.getConstantFP(0x1.0p+32, SL, VT);
2491 SDValue One = DAG.getConstantFP(1.0, SL, VT);
2492 SDValue ScaleFactor =
2493 DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, Scale32, One, Flags);
2495 SDValue ScaledInput = DAG.getNode(ISD::FMUL, SL, VT, Src, ScaleFactor, Flags);
2497 SDValue Log2 = DAG.getNode(AMDGPUISD::LOG, SL, VT, ScaledInput, Flags);
2499 SDValue ThirtyTwo = DAG.getConstantFP(32.0, SL, VT);
2500 SDValue Zero = DAG.getConstantFP(0.0, SL, VT);
2501 SDValue ResultOffset =
2502 DAG.getNode(ISD::SELECT, SL, VT, IsLtSmallestNormal, ThirtyTwo, Zero);
2503 return DAG.getNode(ISD::FSUB, SL, VT, Log2, ResultOffset, Flags);
2506 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG,
2507 double Log2BaseInverted) const {
2508 EVT VT = Op.getValueType();
2510 SDLoc SL(Op);
2511 SDValue Operand = Op.getOperand(0);
2512 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand);
2513 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT);
2515 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand);
2518 // exp2(M_LOG2E_F * f);
2519 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const {
2520 EVT VT = Op.getValueType();
2521 SDLoc SL(Op);
2522 SDValue Src = Op.getOperand(0);
2524 const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT);
2525 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags());
2526 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags());
2529 static bool isCtlzOpc(unsigned Opc) {
2530 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2533 static bool isCttzOpc(unsigned Opc) {
2534 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF;
2537 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const {
2538 SDLoc SL(Op);
2539 SDValue Src = Op.getOperand(0);
2541 assert(isCtlzOpc(Op.getOpcode()) || isCttzOpc(Op.getOpcode()));
2542 bool Ctlz = isCtlzOpc(Op.getOpcode());
2543 unsigned NewOpc = Ctlz ? AMDGPUISD::FFBH_U32 : AMDGPUISD::FFBL_B32;
2545 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF ||
2546 Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF;
2548 if (Src.getValueType() == MVT::i32) {
2549 // (ctlz hi:lo) -> (umin (ffbh src), 32)
2550 // (cttz hi:lo) -> (umin (ffbl src), 32)
2551 // (ctlz_zero_undef src) -> (ffbh src)
2552 // (cttz_zero_undef src) -> (ffbl src)
2553 SDValue NewOpr = DAG.getNode(NewOpc, SL, MVT::i32, Src);
2554 if (!ZeroUndef) {
2555 const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2556 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const32);
2558 return NewOpr;
2561 SDValue Lo, Hi;
2562 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2564 SDValue OprLo = DAG.getNode(NewOpc, SL, MVT::i32, Lo);
2565 SDValue OprHi = DAG.getNode(NewOpc, SL, MVT::i32, Hi);
2567 // (ctlz hi:lo) -> (umin3 (ffbh hi), (uaddsat (ffbh lo), 32), 64)
2568 // (cttz hi:lo) -> (umin3 (uaddsat (ffbl hi), 32), (ffbl lo), 64)
2569 // (ctlz_zero_undef hi:lo) -> (umin (ffbh hi), (add (ffbh lo), 32))
2570 // (cttz_zero_undef hi:lo) -> (umin (add (ffbl hi), 32), (ffbl lo))
2572 unsigned AddOpc = ZeroUndef ? ISD::ADD : ISD::UADDSAT;
2573 const SDValue Const32 = DAG.getConstant(32, SL, MVT::i32);
2574 if (Ctlz)
2575 OprLo = DAG.getNode(AddOpc, SL, MVT::i32, OprLo, Const32);
2576 else
2577 OprHi = DAG.getNode(AddOpc, SL, MVT::i32, OprHi, Const32);
2579 SDValue NewOpr;
2580 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, OprLo, OprHi);
2581 if (!ZeroUndef) {
2582 const SDValue Const64 = DAG.getConstant(64, SL, MVT::i32);
2583 NewOpr = DAG.getNode(ISD::UMIN, SL, MVT::i32, NewOpr, Const64);
2586 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr);
2589 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2590 bool Signed) const {
2591 // The regular method converting a 64-bit integer to float roughly consists of
2592 // 2 steps: normalization and rounding. In fact, after normalization, the
2593 // conversion from a 64-bit integer to a float is essentially the same as the
2594 // one from a 32-bit integer. The only difference is that it has more
2595 // trailing bits to be rounded. To leverage the native 32-bit conversion, a
2596 // 64-bit integer could be preprocessed and fit into a 32-bit integer then
2597 // converted into the correct float number. The basic steps for the unsigned
2598 // conversion are illustrated in the following pseudo code:
2600 // f32 uitofp(i64 u) {
2601 // i32 hi, lo = split(u);
2602 // // Only count the leading zeros in hi as we have native support of the
2603 // // conversion from i32 to f32. If hi is all 0s, the conversion is
2604 // // reduced to a 32-bit one automatically.
2605 // i32 shamt = clz(hi); // Return 32 if hi is all 0s.
2606 // u <<= shamt;
2607 // hi, lo = split(u);
2608 // hi |= (lo != 0) ? 1 : 0; // Adjust rounding bit in hi based on lo.
2609 // // convert it as a 32-bit integer and scale the result back.
2610 // return uitofp(hi) * 2^(32 - shamt);
2611 // }
2613 // The signed one follows the same principle but uses 'ffbh_i32' to count its
2614 // sign bits instead. If 'ffbh_i32' is not available, its absolute value is
2615 // converted instead followed by negation based its sign bit.
2617 SDLoc SL(Op);
2618 SDValue Src = Op.getOperand(0);
2620 SDValue Lo, Hi;
2621 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2622 SDValue Sign;
2623 SDValue ShAmt;
2624 if (Signed && Subtarget->isGCN()) {
2625 // We also need to consider the sign bit in Lo if Hi has just sign bits,
2626 // i.e. Hi is 0 or -1. However, that only needs to take the MSB into
2627 // account. That is, the maximal shift is
2628 // - 32 if Lo and Hi have opposite signs;
2629 // - 33 if Lo and Hi have the same sign.
2631 // Or, MaxShAmt = 33 + OppositeSign, where
2633 // OppositeSign is defined as ((Lo ^ Hi) >> 31), which is
2634 // - -1 if Lo and Hi have opposite signs; and
2635 // - 0 otherwise.
2637 // All in all, ShAmt is calculated as
2639 // umin(sffbh(Hi), 33 + (Lo^Hi)>>31) - 1.
2641 // or
2643 // umin(sffbh(Hi) - 1, 32 + (Lo^Hi)>>31).
2645 // to reduce the critical path.
2646 SDValue OppositeSign = DAG.getNode(
2647 ISD::SRA, SL, MVT::i32, DAG.getNode(ISD::XOR, SL, MVT::i32, Lo, Hi),
2648 DAG.getConstant(31, SL, MVT::i32));
2649 SDValue MaxShAmt =
2650 DAG.getNode(ISD::ADD, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2651 OppositeSign);
2652 // Count the leading sign bits.
2653 ShAmt = DAG.getNode(AMDGPUISD::FFBH_I32, SL, MVT::i32, Hi);
2654 // Different from unsigned conversion, the shift should be one bit less to
2655 // preserve the sign bit.
2656 ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, ShAmt,
2657 DAG.getConstant(1, SL, MVT::i32));
2658 ShAmt = DAG.getNode(ISD::UMIN, SL, MVT::i32, ShAmt, MaxShAmt);
2659 } else {
2660 if (Signed) {
2661 // Without 'ffbh_i32', only leading zeros could be counted. Take the
2662 // absolute value first.
2663 Sign = DAG.getNode(ISD::SRA, SL, MVT::i64, Src,
2664 DAG.getConstant(63, SL, MVT::i64));
2665 SDValue Abs =
2666 DAG.getNode(ISD::XOR, SL, MVT::i64,
2667 DAG.getNode(ISD::ADD, SL, MVT::i64, Src, Sign), Sign);
2668 std::tie(Lo, Hi) = split64BitValue(Abs, DAG);
2670 // Count the leading zeros.
2671 ShAmt = DAG.getNode(ISD::CTLZ, SL, MVT::i32, Hi);
2672 // The shift amount for signed integers is [0, 32].
2674 // Normalize the given 64-bit integer.
2675 SDValue Norm = DAG.getNode(ISD::SHL, SL, MVT::i64, Src, ShAmt);
2676 // Split it again.
2677 std::tie(Lo, Hi) = split64BitValue(Norm, DAG);
2678 // Calculate the adjust bit for rounding.
2679 // (lo != 0) ? 1 : 0 => (lo >= 1) ? 1 : 0 => umin(1, lo)
2680 SDValue Adjust = DAG.getNode(ISD::UMIN, SL, MVT::i32,
2681 DAG.getConstant(1, SL, MVT::i32), Lo);
2682 // Get the 32-bit normalized integer.
2683 Norm = DAG.getNode(ISD::OR, SL, MVT::i32, Hi, Adjust);
2684 // Convert the normalized 32-bit integer into f32.
2685 unsigned Opc =
2686 (Signed && Subtarget->isGCN()) ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2687 SDValue FVal = DAG.getNode(Opc, SL, MVT::f32, Norm);
2689 // Finally, need to scale back the converted floating number as the original
2690 // 64-bit integer is converted as a 32-bit one.
2691 ShAmt = DAG.getNode(ISD::SUB, SL, MVT::i32, DAG.getConstant(32, SL, MVT::i32),
2692 ShAmt);
2693 // On GCN, use LDEXP directly.
2694 if (Subtarget->isGCN())
2695 return DAG.getNode(ISD::FLDEXP, SL, MVT::f32, FVal, ShAmt);
2697 // Otherwise, align 'ShAmt' to the exponent part and add it into the exponent
2698 // part directly to emulate the multiplication of 2^ShAmt. That 8-bit
2699 // exponent is enough to avoid overflowing into the sign bit.
2700 SDValue Exp = DAG.getNode(ISD::SHL, SL, MVT::i32, ShAmt,
2701 DAG.getConstant(23, SL, MVT::i32));
2702 SDValue IVal =
2703 DAG.getNode(ISD::ADD, SL, MVT::i32,
2704 DAG.getNode(ISD::BITCAST, SL, MVT::i32, FVal), Exp);
2705 if (Signed) {
2706 // Set the sign bit.
2707 Sign = DAG.getNode(ISD::SHL, SL, MVT::i32,
2708 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Sign),
2709 DAG.getConstant(31, SL, MVT::i32));
2710 IVal = DAG.getNode(ISD::OR, SL, MVT::i32, IVal, Sign);
2712 return DAG.getNode(ISD::BITCAST, SL, MVT::f32, IVal);
2715 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2716 bool Signed) const {
2717 SDLoc SL(Op);
2718 SDValue Src = Op.getOperand(0);
2720 SDValue Lo, Hi;
2721 std::tie(Lo, Hi) = split64BitValue(Src, DAG);
2723 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2724 SL, MVT::f64, Hi);
2726 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2728 SDValue LdExp = DAG.getNode(ISD::FLDEXP, SL, MVT::f64, CvtHi,
2729 DAG.getConstant(32, SL, MVT::i32));
2730 // TODO: Should this propagate fast-math-flags?
2731 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2734 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2735 SelectionDAG &DAG) const {
2736 // TODO: Factor out code common with LowerSINT_TO_FP.
2737 EVT DestVT = Op.getValueType();
2738 SDValue Src = Op.getOperand(0);
2739 EVT SrcVT = Src.getValueType();
2741 if (SrcVT == MVT::i16) {
2742 if (DestVT == MVT::f16)
2743 return Op;
2744 SDLoc DL(Op);
2746 // Promote src to i32
2747 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src);
2748 return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext);
2751 assert(SrcVT == MVT::i64 && "operation should be legal");
2753 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2754 SDLoc DL(Op);
2756 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2757 SDValue FPRoundFlag =
2758 DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
2759 SDValue FPRound =
2760 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2762 return FPRound;
2765 if (DestVT == MVT::f32)
2766 return LowerINT_TO_FP32(Op, DAG, false);
2768 assert(DestVT == MVT::f64);
2769 return LowerINT_TO_FP64(Op, DAG, false);
2772 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2773 SelectionDAG &DAG) const {
2774 EVT DestVT = Op.getValueType();
2776 SDValue Src = Op.getOperand(0);
2777 EVT SrcVT = Src.getValueType();
2779 if (SrcVT == MVT::i16) {
2780 if (DestVT == MVT::f16)
2781 return Op;
2783 SDLoc DL(Op);
2784 // Promote src to i32
2785 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src);
2786 return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext);
2789 assert(SrcVT == MVT::i64 && "operation should be legal");
2791 // TODO: Factor out code common with LowerUINT_TO_FP.
2793 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) {
2794 SDLoc DL(Op);
2795 SDValue Src = Op.getOperand(0);
2797 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src);
2798 SDValue FPRoundFlag =
2799 DAG.getIntPtrConstant(0, SDLoc(Op), /*isTarget=*/true);
2800 SDValue FPRound =
2801 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag);
2803 return FPRound;
2806 if (DestVT == MVT::f32)
2807 return LowerINT_TO_FP32(Op, DAG, true);
2809 assert(DestVT == MVT::f64);
2810 return LowerINT_TO_FP64(Op, DAG, true);
2813 SDValue AMDGPUTargetLowering::LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG,
2814 bool Signed) const {
2815 SDLoc SL(Op);
2817 SDValue Src = Op.getOperand(0);
2818 EVT SrcVT = Src.getValueType();
2820 assert(SrcVT == MVT::f32 || SrcVT == MVT::f64);
2822 // The basic idea of converting a floating point number into a pair of 32-bit
2823 // integers is illustrated as follows:
2825 // tf := trunc(val);
2826 // hif := floor(tf * 2^-32);
2827 // lof := tf - hif * 2^32; // lof is always positive due to floor.
2828 // hi := fptoi(hif);
2829 // lo := fptoi(lof);
2831 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, SrcVT, Src);
2832 SDValue Sign;
2833 if (Signed && SrcVT == MVT::f32) {
2834 // However, a 32-bit floating point number has only 23 bits mantissa and
2835 // it's not enough to hold all the significant bits of `lof` if val is
2836 // negative. To avoid the loss of precision, We need to take the absolute
2837 // value after truncating and flip the result back based on the original
2838 // signedness.
2839 Sign = DAG.getNode(ISD::SRA, SL, MVT::i32,
2840 DAG.getNode(ISD::BITCAST, SL, MVT::i32, Trunc),
2841 DAG.getConstant(31, SL, MVT::i32));
2842 Trunc = DAG.getNode(ISD::FABS, SL, SrcVT, Trunc);
2845 SDValue K0, K1;
2846 if (SrcVT == MVT::f64) {
2847 K0 = DAG.getConstantFP(
2848 llvm::bit_cast<double>(UINT64_C(/*2^-32*/ 0x3df0000000000000)), SL,
2849 SrcVT);
2850 K1 = DAG.getConstantFP(
2851 llvm::bit_cast<double>(UINT64_C(/*-2^32*/ 0xc1f0000000000000)), SL,
2852 SrcVT);
2853 } else {
2854 K0 = DAG.getConstantFP(
2855 llvm::bit_cast<float>(UINT32_C(/*2^-32*/ 0x2f800000)), SL, SrcVT);
2856 K1 = DAG.getConstantFP(
2857 llvm::bit_cast<float>(UINT32_C(/*-2^32*/ 0xcf800000)), SL, SrcVT);
2859 // TODO: Should this propagate fast-math-flags?
2860 SDValue Mul = DAG.getNode(ISD::FMUL, SL, SrcVT, Trunc, K0);
2862 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, SrcVT, Mul);
2864 SDValue Fma = DAG.getNode(ISD::FMA, SL, SrcVT, FloorMul, K1, Trunc);
2866 SDValue Hi = DAG.getNode((Signed && SrcVT == MVT::f64) ? ISD::FP_TO_SINT
2867 : ISD::FP_TO_UINT,
2868 SL, MVT::i32, FloorMul);
2869 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2871 SDValue Result = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2872 DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}));
2874 if (Signed && SrcVT == MVT::f32) {
2875 assert(Sign);
2876 // Flip the result based on the signedness, which is either all 0s or 1s.
2877 Sign = DAG.getNode(ISD::BITCAST, SL, MVT::i64,
2878 DAG.getBuildVector(MVT::v2i32, SL, {Sign, Sign}));
2879 // r := xor(r, sign) - sign;
2880 Result =
2881 DAG.getNode(ISD::SUB, SL, MVT::i64,
2882 DAG.getNode(ISD::XOR, SL, MVT::i64, Result, Sign), Sign);
2885 return Result;
2888 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const {
2889 SDLoc DL(Op);
2890 SDValue N0 = Op.getOperand(0);
2892 // Convert to target node to get known bits
2893 if (N0.getValueType() == MVT::f32)
2894 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0);
2896 if (getTargetMachine().Options.UnsafeFPMath) {
2897 // There is a generic expand for FP_TO_FP16 with unsafe fast math.
2898 return SDValue();
2901 assert(N0.getSimpleValueType() == MVT::f64);
2903 // f64 -> f16 conversion using round-to-nearest-even rounding mode.
2904 const unsigned ExpMask = 0x7ff;
2905 const unsigned ExpBiasf64 = 1023;
2906 const unsigned ExpBiasf16 = 15;
2907 SDValue Zero = DAG.getConstant(0, DL, MVT::i32);
2908 SDValue One = DAG.getConstant(1, DL, MVT::i32);
2909 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0);
2910 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U,
2911 DAG.getConstant(32, DL, MVT::i64));
2912 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32);
2913 U = DAG.getZExtOrTrunc(U, DL, MVT::i32);
2914 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2915 DAG.getConstant(20, DL, MVT::i64));
2916 E = DAG.getNode(ISD::AND, DL, MVT::i32, E,
2917 DAG.getConstant(ExpMask, DL, MVT::i32));
2918 // Subtract the fp64 exponent bias (1023) to get the real exponent and
2919 // add the f16 bias (15) to get the biased exponent for the f16 format.
2920 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E,
2921 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32));
2923 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2924 DAG.getConstant(8, DL, MVT::i32));
2925 M = DAG.getNode(ISD::AND, DL, MVT::i32, M,
2926 DAG.getConstant(0xffe, DL, MVT::i32));
2928 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH,
2929 DAG.getConstant(0x1ff, DL, MVT::i32));
2930 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U);
2932 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ);
2933 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set);
2935 // (M != 0 ? 0x0200 : 0) | 0x7c00;
2936 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32,
2937 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32),
2938 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32));
2940 // N = M | (E << 12);
2941 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2942 DAG.getNode(ISD::SHL, DL, MVT::i32, E,
2943 DAG.getConstant(12, DL, MVT::i32)));
2945 // B = clamp(1-E, 0, 13);
2946 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32,
2947 One, E);
2948 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero);
2949 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B,
2950 DAG.getConstant(13, DL, MVT::i32));
2952 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M,
2953 DAG.getConstant(0x1000, DL, MVT::i32));
2955 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B);
2956 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B);
2957 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE);
2958 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1);
2960 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT);
2961 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V,
2962 DAG.getConstant(0x7, DL, MVT::i32));
2963 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V,
2964 DAG.getConstant(2, DL, MVT::i32));
2965 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32),
2966 One, Zero, ISD::SETEQ);
2967 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32),
2968 One, Zero, ISD::SETGT);
2969 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1);
2970 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1);
2972 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32),
2973 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT);
2974 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32),
2975 I, V, ISD::SETEQ);
2977 // Extract the sign bit.
2978 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH,
2979 DAG.getConstant(16, DL, MVT::i32));
2980 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign,
2981 DAG.getConstant(0x8000, DL, MVT::i32));
2983 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V);
2984 return DAG.getZExtOrTrunc(V, DL, Op.getValueType());
2987 SDValue AMDGPUTargetLowering::LowerFP_TO_INT(SDValue Op,
2988 SelectionDAG &DAG) const {
2989 SDValue Src = Op.getOperand(0);
2990 unsigned OpOpcode = Op.getOpcode();
2991 EVT SrcVT = Src.getValueType();
2992 EVT DestVT = Op.getValueType();
2994 // Will be selected natively
2995 if (SrcVT == MVT::f16 && DestVT == MVT::i16)
2996 return Op;
2998 // Promote i16 to i32
2999 if (DestVT == MVT::i16 && (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
3000 SDLoc DL(Op);
3002 SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3003 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FpToInt32);
3006 if (SrcVT == MVT::f16 ||
3007 (SrcVT == MVT::f32 && Src.getOpcode() == ISD::FP16_TO_FP)) {
3008 SDLoc DL(Op);
3010 SDValue FpToInt32 = DAG.getNode(OpOpcode, DL, MVT::i32, Src);
3011 unsigned Ext =
3012 OpOpcode == ISD::FP_TO_SINT ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
3013 return DAG.getNode(Ext, DL, MVT::i64, FpToInt32);
3016 if (DestVT == MVT::i64 && (SrcVT == MVT::f32 || SrcVT == MVT::f64))
3017 return LowerFP_TO_INT64(Op, DAG, OpOpcode == ISD::FP_TO_SINT);
3019 return SDValue();
3022 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
3023 SelectionDAG &DAG) const {
3024 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
3025 MVT VT = Op.getSimpleValueType();
3026 MVT ScalarVT = VT.getScalarType();
3028 assert(VT.isVector());
3030 SDValue Src = Op.getOperand(0);
3031 SDLoc DL(Op);
3033 // TODO: Don't scalarize on Evergreen?
3034 unsigned NElts = VT.getVectorNumElements();
3035 SmallVector<SDValue, 8> Args;
3036 DAG.ExtractVectorElements(Src, Args, 0, NElts);
3038 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
3039 for (unsigned I = 0; I < NElts; ++I)
3040 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
3042 return DAG.getBuildVector(VT, DL, Args);
3045 //===----------------------------------------------------------------------===//
3046 // Custom DAG optimizations
3047 //===----------------------------------------------------------------------===//
3049 static bool isU24(SDValue Op, SelectionDAG &DAG) {
3050 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24;
3053 static bool isI24(SDValue Op, SelectionDAG &DAG) {
3054 EVT VT = Op.getValueType();
3055 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
3056 // as unsigned 24-bit values.
3057 AMDGPUTargetLowering::numBitsSigned(Op, DAG) <= 24;
3060 static SDValue simplifyMul24(SDNode *Node24,
3061 TargetLowering::DAGCombinerInfo &DCI) {
3062 SelectionDAG &DAG = DCI.DAG;
3063 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3064 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN;
3066 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0);
3067 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1);
3068 unsigned NewOpcode = Node24->getOpcode();
3069 if (IsIntrin) {
3070 unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue();
3071 switch (IID) {
3072 case Intrinsic::amdgcn_mul_i24:
3073 NewOpcode = AMDGPUISD::MUL_I24;
3074 break;
3075 case Intrinsic::amdgcn_mul_u24:
3076 NewOpcode = AMDGPUISD::MUL_U24;
3077 break;
3078 case Intrinsic::amdgcn_mulhi_i24:
3079 NewOpcode = AMDGPUISD::MULHI_I24;
3080 break;
3081 case Intrinsic::amdgcn_mulhi_u24:
3082 NewOpcode = AMDGPUISD::MULHI_U24;
3083 break;
3084 default:
3085 llvm_unreachable("Expected 24-bit mul intrinsic");
3089 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24);
3091 // First try to simplify using SimplifyMultipleUseDemandedBits which allows
3092 // the operands to have other uses, but will only perform simplifications that
3093 // involve bypassing some nodes for this user.
3094 SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG);
3095 SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG);
3096 if (DemandedLHS || DemandedRHS)
3097 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(),
3098 DemandedLHS ? DemandedLHS : LHS,
3099 DemandedRHS ? DemandedRHS : RHS);
3101 // Now try SimplifyDemandedBits which can simplify the nodes used by our
3102 // operands if this node is the only user.
3103 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI))
3104 return SDValue(Node24, 0);
3105 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI))
3106 return SDValue(Node24, 0);
3108 return SDValue();
3111 template <typename IntTy>
3112 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset,
3113 uint32_t Width, const SDLoc &DL) {
3114 if (Width + Offset < 32) {
3115 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
3116 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
3117 return DAG.getConstant(Result, DL, MVT::i32);
3120 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
3123 static bool hasVolatileUser(SDNode *Val) {
3124 for (SDNode *U : Val->uses()) {
3125 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) {
3126 if (M->isVolatile())
3127 return true;
3131 return false;
3134 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const {
3135 // i32 vectors are the canonical memory type.
3136 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT))
3137 return false;
3139 if (!VT.isByteSized())
3140 return false;
3142 unsigned Size = VT.getStoreSize();
3144 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector())
3145 return false;
3147 if (Size == 3 || (Size > 4 && (Size % 4 != 0)))
3148 return false;
3150 return true;
3153 // Replace load of an illegal type with a store of a bitcast to a friendlier
3154 // type.
3155 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
3156 DAGCombinerInfo &DCI) const {
3157 if (!DCI.isBeforeLegalize())
3158 return SDValue();
3160 LoadSDNode *LN = cast<LoadSDNode>(N);
3161 if (!LN->isSimple() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN))
3162 return SDValue();
3164 SDLoc SL(N);
3165 SelectionDAG &DAG = DCI.DAG;
3166 EVT VT = LN->getMemoryVT();
3168 unsigned Size = VT.getStoreSize();
3169 Align Alignment = LN->getAlign();
3170 if (Alignment < Size && isTypeLegal(VT)) {
3171 unsigned IsFast;
3172 unsigned AS = LN->getAddressSpace();
3174 // Expand unaligned loads earlier than legalization. Due to visitation order
3175 // problems during legalization, the emitted instructions to pack and unpack
3176 // the bytes again are not eliminated in the case of an unaligned copy.
3177 if (!allowsMisalignedMemoryAccesses(
3178 VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
3179 if (VT.isVector())
3180 return SplitVectorLoad(SDValue(LN, 0), DAG);
3182 SDValue Ops[2];
3183 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
3185 return DAG.getMergeValues(Ops, SDLoc(N));
3188 if (!IsFast)
3189 return SDValue();
3192 if (!shouldCombineMemoryType(VT))
3193 return SDValue();
3195 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3197 SDValue NewLoad
3198 = DAG.getLoad(NewVT, SL, LN->getChain(),
3199 LN->getBasePtr(), LN->getMemOperand());
3201 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad);
3202 DCI.CombineTo(N, BC, NewLoad.getValue(1));
3203 return SDValue(N, 0);
3206 // Replace store of an illegal type with a store of a bitcast to a friendlier
3207 // type.
3208 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
3209 DAGCombinerInfo &DCI) const {
3210 if (!DCI.isBeforeLegalize())
3211 return SDValue();
3213 StoreSDNode *SN = cast<StoreSDNode>(N);
3214 if (!SN->isSimple() || !ISD::isNormalStore(SN))
3215 return SDValue();
3217 EVT VT = SN->getMemoryVT();
3218 unsigned Size = VT.getStoreSize();
3220 SDLoc SL(N);
3221 SelectionDAG &DAG = DCI.DAG;
3222 Align Alignment = SN->getAlign();
3223 if (Alignment < Size && isTypeLegal(VT)) {
3224 unsigned IsFast;
3225 unsigned AS = SN->getAddressSpace();
3227 // Expand unaligned stores earlier than legalization. Due to visitation
3228 // order problems during legalization, the emitted instructions to pack and
3229 // unpack the bytes again are not eliminated in the case of an unaligned
3230 // copy.
3231 if (!allowsMisalignedMemoryAccesses(
3232 VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
3233 if (VT.isVector())
3234 return SplitVectorStore(SDValue(SN, 0), DAG);
3236 return expandUnalignedStore(SN, DAG);
3239 if (!IsFast)
3240 return SDValue();
3243 if (!shouldCombineMemoryType(VT))
3244 return SDValue();
3246 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT);
3247 SDValue Val = SN->getValue();
3249 //DCI.AddToWorklist(Val.getNode());
3251 bool OtherUses = !Val.hasOneUse();
3252 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val);
3253 if (OtherUses) {
3254 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal);
3255 DAG.ReplaceAllUsesOfValueWith(Val, CastBack);
3258 return DAG.getStore(SN->getChain(), SL, CastVal,
3259 SN->getBasePtr(), SN->getMemOperand());
3262 // FIXME: This should go in generic DAG combiner with an isTruncateFree check,
3263 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU
3264 // issues.
3265 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N,
3266 DAGCombinerInfo &DCI) const {
3267 SelectionDAG &DAG = DCI.DAG;
3268 SDValue N0 = N->getOperand(0);
3270 // (vt2 (assertzext (truncate vt0:x), vt1)) ->
3271 // (vt2 (truncate (assertzext vt0:x, vt1)))
3272 if (N0.getOpcode() == ISD::TRUNCATE) {
3273 SDValue N1 = N->getOperand(1);
3274 EVT ExtVT = cast<VTSDNode>(N1)->getVT();
3275 SDLoc SL(N);
3277 SDValue Src = N0.getOperand(0);
3278 EVT SrcVT = Src.getValueType();
3279 if (SrcVT.bitsGE(ExtVT)) {
3280 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1);
3281 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg);
3285 return SDValue();
3288 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine(
3289 SDNode *N, DAGCombinerInfo &DCI) const {
3290 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3291 switch (IID) {
3292 case Intrinsic::amdgcn_mul_i24:
3293 case Intrinsic::amdgcn_mul_u24:
3294 case Intrinsic::amdgcn_mulhi_i24:
3295 case Intrinsic::amdgcn_mulhi_u24:
3296 return simplifyMul24(N, DCI);
3297 case Intrinsic::amdgcn_fract:
3298 case Intrinsic::amdgcn_rsq:
3299 case Intrinsic::amdgcn_rcp_legacy:
3300 case Intrinsic::amdgcn_rsq_legacy:
3301 case Intrinsic::amdgcn_rsq_clamp:
3302 case Intrinsic::amdgcn_ldexp: {
3303 // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted
3304 SDValue Src = N->getOperand(1);
3305 return Src.isUndef() ? Src : SDValue();
3307 default:
3308 return SDValue();
3312 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the
3313 /// binary operation \p Opc to it with the corresponding constant operands.
3314 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl(
3315 DAGCombinerInfo &DCI, const SDLoc &SL,
3316 unsigned Opc, SDValue LHS,
3317 uint32_t ValLo, uint32_t ValHi) const {
3318 SelectionDAG &DAG = DCI.DAG;
3319 SDValue Lo, Hi;
3320 std::tie(Lo, Hi) = split64BitValue(LHS, DAG);
3322 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32);
3323 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32);
3325 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS);
3326 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS);
3328 // Re-visit the ands. It's possible we eliminated one of them and it could
3329 // simplify the vector.
3330 DCI.AddToWorklist(Lo.getNode());
3331 DCI.AddToWorklist(Hi.getNode());
3333 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
3334 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3337 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
3338 DAGCombinerInfo &DCI) const {
3339 EVT VT = N->getValueType(0);
3341 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3342 if (!RHS)
3343 return SDValue();
3345 SDValue LHS = N->getOperand(0);
3346 unsigned RHSVal = RHS->getZExtValue();
3347 if (!RHSVal)
3348 return LHS;
3350 SDLoc SL(N);
3351 SelectionDAG &DAG = DCI.DAG;
3353 switch (LHS->getOpcode()) {
3354 default:
3355 break;
3356 case ISD::ZERO_EXTEND:
3357 case ISD::SIGN_EXTEND:
3358 case ISD::ANY_EXTEND: {
3359 SDValue X = LHS->getOperand(0);
3361 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 &&
3362 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) {
3363 // Prefer build_vector as the canonical form if packed types are legal.
3364 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x
3365 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL,
3366 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) });
3367 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec);
3370 // shl (ext x) => zext (shl x), if shift does not overflow int
3371 if (VT != MVT::i64)
3372 break;
3373 KnownBits Known = DAG.computeKnownBits(X);
3374 unsigned LZ = Known.countMinLeadingZeros();
3375 if (LZ < RHSVal)
3376 break;
3377 EVT XVT = X.getValueType();
3378 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
3379 return DAG.getZExtOrTrunc(Shl, SL, VT);
3383 if (VT != MVT::i64)
3384 return SDValue();
3386 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
3388 // On some subtargets, 64-bit shift is a quarter rate instruction. In the
3389 // common case, splitting this into a move and a 32-bit shift is faster and
3390 // the same code size.
3391 if (RHSVal < 32)
3392 return SDValue();
3394 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
3396 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
3397 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
3399 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3401 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
3402 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
3405 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
3406 DAGCombinerInfo &DCI) const {
3407 if (N->getValueType(0) != MVT::i64)
3408 return SDValue();
3410 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3411 if (!RHS)
3412 return SDValue();
3414 SelectionDAG &DAG = DCI.DAG;
3415 SDLoc SL(N);
3416 unsigned RHSVal = RHS->getZExtValue();
3418 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
3419 if (RHSVal == 32) {
3420 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3421 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3422 DAG.getConstant(31, SL, MVT::i32));
3424 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
3425 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3428 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
3429 if (RHSVal == 63) {
3430 SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
3431 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
3432 DAG.getConstant(31, SL, MVT::i32));
3433 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
3434 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
3437 return SDValue();
3440 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
3441 DAGCombinerInfo &DCI) const {
3442 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
3443 if (!RHS)
3444 return SDValue();
3446 EVT VT = N->getValueType(0);
3447 SDValue LHS = N->getOperand(0);
3448 unsigned ShiftAmt = RHS->getZExtValue();
3449 SelectionDAG &DAG = DCI.DAG;
3450 SDLoc SL(N);
3452 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1)
3453 // this improves the ability to match BFE patterns in isel.
3454 if (LHS.getOpcode() == ISD::AND) {
3455 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) {
3456 unsigned MaskIdx, MaskLen;
3457 if (Mask->getAPIntValue().isShiftedMask(MaskIdx, MaskLen) &&
3458 MaskIdx == ShiftAmt) {
3459 return DAG.getNode(
3460 ISD::AND, SL, VT,
3461 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)),
3462 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1)));
3467 if (VT != MVT::i64)
3468 return SDValue();
3470 if (ShiftAmt < 32)
3471 return SDValue();
3473 // srl i64:x, C for C >= 32
3474 // =>
3475 // build_pair (srl hi_32(x), C - 32), 0
3476 SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
3478 SDValue Hi = getHiHalf64(LHS, DAG);
3480 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
3481 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
3483 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
3485 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
3488 SDValue AMDGPUTargetLowering::performTruncateCombine(
3489 SDNode *N, DAGCombinerInfo &DCI) const {
3490 SDLoc SL(N);
3491 SelectionDAG &DAG = DCI.DAG;
3492 EVT VT = N->getValueType(0);
3493 SDValue Src = N->getOperand(0);
3495 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x)
3496 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) {
3497 SDValue Vec = Src.getOperand(0);
3498 if (Vec.getOpcode() == ISD::BUILD_VECTOR) {
3499 SDValue Elt0 = Vec.getOperand(0);
3500 EVT EltVT = Elt0.getValueType();
3501 if (VT.getFixedSizeInBits() <= EltVT.getFixedSizeInBits()) {
3502 if (EltVT.isFloatingPoint()) {
3503 Elt0 = DAG.getNode(ISD::BITCAST, SL,
3504 EltVT.changeTypeToInteger(), Elt0);
3507 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0);
3512 // Equivalent of above for accessing the high element of a vector as an
3513 // integer operation.
3514 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y)
3515 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) {
3516 if (auto K = isConstOrConstSplat(Src.getOperand(1))) {
3517 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) {
3518 SDValue BV = stripBitcast(Src.getOperand(0));
3519 if (BV.getOpcode() == ISD::BUILD_VECTOR &&
3520 BV.getValueType().getVectorNumElements() == 2) {
3521 SDValue SrcElt = BV.getOperand(1);
3522 EVT SrcEltVT = SrcElt.getValueType();
3523 if (SrcEltVT.isFloatingPoint()) {
3524 SrcElt = DAG.getNode(ISD::BITCAST, SL,
3525 SrcEltVT.changeTypeToInteger(), SrcElt);
3528 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt);
3534 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit.
3536 // i16 (trunc (srl i64:x, K)), K <= 16 ->
3537 // i16 (trunc (srl (i32 (trunc x), K)))
3538 if (VT.getScalarSizeInBits() < 32) {
3539 EVT SrcVT = Src.getValueType();
3540 if (SrcVT.getScalarSizeInBits() > 32 &&
3541 (Src.getOpcode() == ISD::SRL ||
3542 Src.getOpcode() == ISD::SRA ||
3543 Src.getOpcode() == ISD::SHL)) {
3544 SDValue Amt = Src.getOperand(1);
3545 KnownBits Known = DAG.computeKnownBits(Amt);
3547 // - For left shifts, do the transform as long as the shift
3548 // amount is still legal for i32, so when ShiftAmt < 32 (<= 31)
3549 // - For right shift, do it if ShiftAmt <= (32 - Size) to avoid
3550 // losing information stored in the high bits when truncating.
3551 const unsigned MaxCstSize =
3552 (Src.getOpcode() == ISD::SHL) ? 31 : (32 - VT.getScalarSizeInBits());
3553 if (Known.getMaxValue().ule(MaxCstSize)) {
3554 EVT MidVT = VT.isVector() ?
3555 EVT::getVectorVT(*DAG.getContext(), MVT::i32,
3556 VT.getVectorNumElements()) : MVT::i32;
3558 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout());
3559 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT,
3560 Src.getOperand(0));
3561 DCI.AddToWorklist(Trunc.getNode());
3563 if (Amt.getValueType() != NewShiftVT) {
3564 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT);
3565 DCI.AddToWorklist(Amt.getNode());
3568 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT,
3569 Trunc, Amt);
3570 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift);
3575 return SDValue();
3578 // We need to specifically handle i64 mul here to avoid unnecessary conversion
3579 // instructions. If we only match on the legalized i64 mul expansion,
3580 // SimplifyDemandedBits will be unable to remove them because there will be
3581 // multiple uses due to the separate mul + mulh[su].
3582 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL,
3583 SDValue N0, SDValue N1, unsigned Size, bool Signed) {
3584 if (Size <= 32) {
3585 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3586 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1);
3589 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24;
3590 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24;
3592 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1);
3593 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1);
3595 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, MulLo, MulHi);
3598 /// If \p V is an add of a constant 1, returns the other operand. Otherwise
3599 /// return SDValue().
3600 static SDValue getAddOneOp(const SDNode *V) {
3601 if (V->getOpcode() != ISD::ADD)
3602 return SDValue();
3604 auto *C = dyn_cast<ConstantSDNode>(V->getOperand(1));
3605 return C && C->isOne() ? V->getOperand(0) : SDValue();
3608 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
3609 DAGCombinerInfo &DCI) const {
3610 EVT VT = N->getValueType(0);
3612 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3613 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3614 // unnecessarily). isDivergent() is used as an approximation of whether the
3615 // value is in an SGPR.
3616 if (!N->isDivergent())
3617 return SDValue();
3619 unsigned Size = VT.getSizeInBits();
3620 if (VT.isVector() || Size > 64)
3621 return SDValue();
3623 SelectionDAG &DAG = DCI.DAG;
3624 SDLoc DL(N);
3626 SDValue N0 = N->getOperand(0);
3627 SDValue N1 = N->getOperand(1);
3629 // Undo InstCombine canonicalize X * (Y + 1) -> X * Y + X to enable mad
3630 // matching.
3632 // mul x, (add y, 1) -> add (mul x, y), x
3633 auto IsFoldableAdd = [](SDValue V) -> SDValue {
3634 SDValue AddOp = getAddOneOp(V.getNode());
3635 if (!AddOp)
3636 return SDValue();
3638 if (V.hasOneUse() || all_of(V->uses(), [](const SDNode *U) -> bool {
3639 return U->getOpcode() == ISD::MUL;
3641 return AddOp;
3643 return SDValue();
3646 // FIXME: The selection pattern is not properly checking for commuted
3647 // operands, so we have to place the mul in the LHS
3648 if (SDValue MulOper = IsFoldableAdd(N0)) {
3649 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N1, MulOper);
3650 return DAG.getNode(ISD::ADD, DL, VT, MulVal, N1);
3653 if (SDValue MulOper = IsFoldableAdd(N1)) {
3654 SDValue MulVal = DAG.getNode(N->getOpcode(), DL, VT, N0, MulOper);
3655 return DAG.getNode(ISD::ADD, DL, VT, MulVal, N0);
3658 // Skip if already mul24.
3659 if (N->getOpcode() != ISD::MUL)
3660 return SDValue();
3662 // There are i16 integer mul/mad.
3663 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16))
3664 return SDValue();
3666 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3667 // in the source into any_extends if the result of the mul is truncated. Since
3668 // we can assume the high bits are whatever we want, use the underlying value
3669 // to avoid the unknown high bits from interfering.
3670 if (N0.getOpcode() == ISD::ANY_EXTEND)
3671 N0 = N0.getOperand(0);
3673 if (N1.getOpcode() == ISD::ANY_EXTEND)
3674 N1 = N1.getOperand(0);
3676 SDValue Mul;
3678 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3679 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3680 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3681 Mul = getMul24(DAG, DL, N0, N1, Size, false);
3682 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3683 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3684 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3685 Mul = getMul24(DAG, DL, N0, N1, Size, true);
3686 } else {
3687 return SDValue();
3690 // We need to use sext even for MUL_U24, because MUL_U24 is used
3691 // for signed multiply of 8 and 16-bit types.
3692 return DAG.getSExtOrTrunc(Mul, DL, VT);
3695 SDValue
3696 AMDGPUTargetLowering::performMulLoHiCombine(SDNode *N,
3697 DAGCombinerInfo &DCI) const {
3698 if (N->getValueType(0) != MVT::i32)
3699 return SDValue();
3701 SelectionDAG &DAG = DCI.DAG;
3702 SDLoc DL(N);
3704 SDValue N0 = N->getOperand(0);
3705 SDValue N1 = N->getOperand(1);
3707 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends
3708 // in the source into any_extends if the result of the mul is truncated. Since
3709 // we can assume the high bits are whatever we want, use the underlying value
3710 // to avoid the unknown high bits from interfering.
3711 if (N0.getOpcode() == ISD::ANY_EXTEND)
3712 N0 = N0.getOperand(0);
3713 if (N1.getOpcode() == ISD::ANY_EXTEND)
3714 N1 = N1.getOperand(0);
3716 // Try to use two fast 24-bit multiplies (one for each half of the result)
3717 // instead of one slow extending multiply.
3718 unsigned LoOpcode, HiOpcode;
3719 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
3720 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3721 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3722 LoOpcode = AMDGPUISD::MUL_U24;
3723 HiOpcode = AMDGPUISD::MULHI_U24;
3724 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
3725 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3726 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3727 LoOpcode = AMDGPUISD::MUL_I24;
3728 HiOpcode = AMDGPUISD::MULHI_I24;
3729 } else {
3730 return SDValue();
3733 SDValue Lo = DAG.getNode(LoOpcode, DL, MVT::i32, N0, N1);
3734 SDValue Hi = DAG.getNode(HiOpcode, DL, MVT::i32, N0, N1);
3735 DCI.CombineTo(N, Lo, Hi);
3736 return SDValue(N, 0);
3739 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N,
3740 DAGCombinerInfo &DCI) const {
3741 EVT VT = N->getValueType(0);
3743 if (!Subtarget->hasMulI24() || VT.isVector())
3744 return SDValue();
3746 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3747 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3748 // unnecessarily). isDivergent() is used as an approximation of whether the
3749 // value is in an SGPR.
3750 // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3751 // valu op anyway)
3752 if (Subtarget->hasSMulHi() && !N->isDivergent())
3753 return SDValue();
3755 SelectionDAG &DAG = DCI.DAG;
3756 SDLoc DL(N);
3758 SDValue N0 = N->getOperand(0);
3759 SDValue N1 = N->getOperand(1);
3761 if (!isI24(N0, DAG) || !isI24(N1, DAG))
3762 return SDValue();
3764 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
3765 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
3767 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1);
3768 DCI.AddToWorklist(Mulhi.getNode());
3769 return DAG.getSExtOrTrunc(Mulhi, DL, VT);
3772 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N,
3773 DAGCombinerInfo &DCI) const {
3774 EVT VT = N->getValueType(0);
3776 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32)
3777 return SDValue();
3779 // Don't generate 24-bit multiplies on values that are in SGPRs, since
3780 // we only have a 32-bit scalar multiply (avoid values being moved to VGPRs
3781 // unnecessarily). isDivergent() is used as an approximation of whether the
3782 // value is in an SGPR.
3783 // This doesn't apply if no s_mul_hi is available (since we'll end up with a
3784 // valu op anyway)
3785 if (Subtarget->hasSMulHi() && !N->isDivergent())
3786 return SDValue();
3788 SelectionDAG &DAG = DCI.DAG;
3789 SDLoc DL(N);
3791 SDValue N0 = N->getOperand(0);
3792 SDValue N1 = N->getOperand(1);
3794 if (!isU24(N0, DAG) || !isU24(N1, DAG))
3795 return SDValue();
3797 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
3798 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
3800 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1);
3801 DCI.AddToWorklist(Mulhi.getNode());
3802 return DAG.getZExtOrTrunc(Mulhi, DL, VT);
3805 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG,
3806 SDValue Op,
3807 const SDLoc &DL,
3808 unsigned Opc) const {
3809 EVT VT = Op.getValueType();
3810 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT);
3811 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() &&
3812 LegalVT != MVT::i16))
3813 return SDValue();
3815 if (VT != MVT::i32)
3816 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op);
3818 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op);
3819 if (VT != MVT::i32)
3820 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX);
3822 return FFBX;
3825 // The native instructions return -1 on 0 input. Optimize out a select that
3826 // produces -1 on 0.
3828 // TODO: If zero is not undef, we could also do this if the output is compared
3829 // against the bitwidth.
3831 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
3832 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond,
3833 SDValue LHS, SDValue RHS,
3834 DAGCombinerInfo &DCI) const {
3835 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3836 if (!CmpRhs || !CmpRhs->isZero())
3837 return SDValue();
3839 SelectionDAG &DAG = DCI.DAG;
3840 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
3841 SDValue CmpLHS = Cond.getOperand(0);
3843 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
3844 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x
3845 if (CCOpcode == ISD::SETEQ &&
3846 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) &&
3847 RHS.getOperand(0) == CmpLHS && isAllOnesConstant(LHS)) {
3848 unsigned Opc =
3849 isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3850 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3853 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
3854 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x
3855 if (CCOpcode == ISD::SETNE &&
3856 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(LHS.getOpcode())) &&
3857 LHS.getOperand(0) == CmpLHS && isAllOnesConstant(RHS)) {
3858 unsigned Opc =
3859 isCttzOpc(LHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : AMDGPUISD::FFBH_U32;
3861 return getFFBX_U32(DAG, CmpLHS, SL, Opc);
3864 return SDValue();
3867 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI,
3868 unsigned Op,
3869 const SDLoc &SL,
3870 SDValue Cond,
3871 SDValue N1,
3872 SDValue N2) {
3873 SelectionDAG &DAG = DCI.DAG;
3874 EVT VT = N1.getValueType();
3876 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond,
3877 N1.getOperand(0), N2.getOperand(0));
3878 DCI.AddToWorklist(NewSelect.getNode());
3879 return DAG.getNode(Op, SL, VT, NewSelect);
3882 // Pull a free FP operation out of a select so it may fold into uses.
3884 // select c, (fneg x), (fneg y) -> fneg (select c, x, y)
3885 // select c, (fneg x), k -> fneg (select c, x, (fneg k))
3887 // select c, (fabs x), (fabs y) -> fabs (select c, x, y)
3888 // select c, (fabs x), +k -> fabs (select c, x, k)
3889 SDValue
3890 AMDGPUTargetLowering::foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI,
3891 SDValue N) const {
3892 SelectionDAG &DAG = DCI.DAG;
3893 SDValue Cond = N.getOperand(0);
3894 SDValue LHS = N.getOperand(1);
3895 SDValue RHS = N.getOperand(2);
3897 EVT VT = N.getValueType();
3898 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) ||
3899 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) {
3900 if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
3901 return SDValue();
3903 return distributeOpThroughSelect(DCI, LHS.getOpcode(),
3904 SDLoc(N), Cond, LHS, RHS);
3907 bool Inv = false;
3908 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) {
3909 std::swap(LHS, RHS);
3910 Inv = true;
3913 // TODO: Support vector constants.
3914 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS);
3915 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS &&
3916 !selectSupportsSourceMods(N.getNode())) {
3917 SDLoc SL(N);
3918 // If one side is an fneg/fabs and the other is a constant, we can push the
3919 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative.
3920 SDValue NewLHS = LHS.getOperand(0);
3921 SDValue NewRHS = RHS;
3923 // Careful: if the neg can be folded up, don't try to pull it back down.
3924 bool ShouldFoldNeg = true;
3926 if (NewLHS.hasOneUse()) {
3927 unsigned Opc = NewLHS.getOpcode();
3928 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(NewLHS.getNode()))
3929 ShouldFoldNeg = false;
3930 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL)
3931 ShouldFoldNeg = false;
3934 if (ShouldFoldNeg) {
3935 if (LHS.getOpcode() == ISD::FABS && CRHS->isNegative())
3936 return SDValue();
3938 // We're going to be forced to use a source modifier anyway, there's no
3939 // point to pulling the negate out unless we can get a size reduction by
3940 // negating the constant.
3942 // TODO: Generalize to use getCheaperNegatedExpression which doesn't know
3943 // about cheaper constants.
3944 if (NewLHS.getOpcode() == ISD::FABS &&
3945 getConstantNegateCost(CRHS) != NegatibleCost::Cheaper)
3946 return SDValue();
3948 if (!AMDGPUTargetLowering::allUsesHaveSourceMods(N.getNode()))
3949 return SDValue();
3951 if (LHS.getOpcode() == ISD::FNEG)
3952 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
3954 if (Inv)
3955 std::swap(NewLHS, NewRHS);
3957 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT,
3958 Cond, NewLHS, NewRHS);
3959 DCI.AddToWorklist(NewSelect.getNode());
3960 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect);
3964 return SDValue();
3967 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
3968 DAGCombinerInfo &DCI) const {
3969 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0)))
3970 return Folded;
3972 SDValue Cond = N->getOperand(0);
3973 if (Cond.getOpcode() != ISD::SETCC)
3974 return SDValue();
3976 EVT VT = N->getValueType(0);
3977 SDValue LHS = Cond.getOperand(0);
3978 SDValue RHS = Cond.getOperand(1);
3979 SDValue CC = Cond.getOperand(2);
3981 SDValue True = N->getOperand(1);
3982 SDValue False = N->getOperand(2);
3984 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses.
3985 SelectionDAG &DAG = DCI.DAG;
3986 if (DAG.isConstantValueOfAnyType(True) &&
3987 !DAG.isConstantValueOfAnyType(False)) {
3988 // Swap cmp + select pair to move constant to false input.
3989 // This will allow using VOPC cndmasks more often.
3990 // select (setcc x, y), k, x -> select (setccinv x, y), x, k
3992 SDLoc SL(N);
3993 ISD::CondCode NewCC =
3994 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType());
3996 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC);
3997 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True);
4000 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) {
4001 SDValue MinMax
4002 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
4003 // Revisit this node so we can catch min3/max3/med3 patterns.
4004 //DCI.AddToWorklist(MinMax.getNode());
4005 return MinMax;
4009 // There's no reason to not do this if the condition has other uses.
4010 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI);
4013 static bool isInv2Pi(const APFloat &APF) {
4014 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118));
4015 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983));
4016 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882));
4018 return APF.bitwiseIsEqual(KF16) ||
4019 APF.bitwiseIsEqual(KF32) ||
4020 APF.bitwiseIsEqual(KF64);
4023 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an
4024 // additional cost to negate them.
4025 TargetLowering::NegatibleCost
4026 AMDGPUTargetLowering::getConstantNegateCost(const ConstantFPSDNode *C) const {
4027 if (C->isZero())
4028 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4030 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF()))
4031 return C->isNegative() ? NegatibleCost::Cheaper : NegatibleCost::Expensive;
4033 return NegatibleCost::Neutral;
4036 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const {
4037 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4038 return getConstantNegateCost(C) == NegatibleCost::Expensive;
4039 return false;
4042 bool AMDGPUTargetLowering::isConstantCheaperToNegate(SDValue N) const {
4043 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N))
4044 return getConstantNegateCost(C) == NegatibleCost::Cheaper;
4045 return false;
4048 static unsigned inverseMinMax(unsigned Opc) {
4049 switch (Opc) {
4050 case ISD::FMAXNUM:
4051 return ISD::FMINNUM;
4052 case ISD::FMINNUM:
4053 return ISD::FMAXNUM;
4054 case ISD::FMAXNUM_IEEE:
4055 return ISD::FMINNUM_IEEE;
4056 case ISD::FMINNUM_IEEE:
4057 return ISD::FMAXNUM_IEEE;
4058 case AMDGPUISD::FMAX_LEGACY:
4059 return AMDGPUISD::FMIN_LEGACY;
4060 case AMDGPUISD::FMIN_LEGACY:
4061 return AMDGPUISD::FMAX_LEGACY;
4062 default:
4063 llvm_unreachable("invalid min/max opcode");
4067 /// \return true if it's profitable to try to push an fneg into its source
4068 /// instruction.
4069 bool AMDGPUTargetLowering::shouldFoldFNegIntoSrc(SDNode *N, SDValue N0) {
4070 // If the input has multiple uses and we can either fold the negate down, or
4071 // the other uses cannot, give up. This both prevents unprofitable
4072 // transformations and infinite loops: we won't repeatedly try to fold around
4073 // a negate that has no 'good' form.
4074 if (N0.hasOneUse()) {
4075 // This may be able to fold into the source, but at a code size cost. Don't
4076 // fold if the fold into the user is free.
4077 if (allUsesHaveSourceMods(N, 0))
4078 return false;
4079 } else {
4080 if (fnegFoldsIntoOp(N0.getNode()) &&
4081 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode())))
4082 return false;
4085 return true;
4088 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N,
4089 DAGCombinerInfo &DCI) const {
4090 SelectionDAG &DAG = DCI.DAG;
4091 SDValue N0 = N->getOperand(0);
4092 EVT VT = N->getValueType(0);
4094 unsigned Opc = N0.getOpcode();
4096 if (!shouldFoldFNegIntoSrc(N, N0))
4097 return SDValue();
4099 SDLoc SL(N);
4100 switch (Opc) {
4101 case ISD::FADD: {
4102 if (!mayIgnoreSignedZero(N0))
4103 return SDValue();
4105 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y))
4106 SDValue LHS = N0.getOperand(0);
4107 SDValue RHS = N0.getOperand(1);
4109 if (LHS.getOpcode() != ISD::FNEG)
4110 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4111 else
4112 LHS = LHS.getOperand(0);
4114 if (RHS.getOpcode() != ISD::FNEG)
4115 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4116 else
4117 RHS = RHS.getOperand(0);
4119 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags());
4120 if (Res.getOpcode() != ISD::FADD)
4121 return SDValue(); // Op got folded away.
4122 if (!N0.hasOneUse())
4123 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4124 return Res;
4126 case ISD::FMUL:
4127 case AMDGPUISD::FMUL_LEGACY: {
4128 // (fneg (fmul x, y)) -> (fmul x, (fneg y))
4129 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y))
4130 SDValue LHS = N0.getOperand(0);
4131 SDValue RHS = N0.getOperand(1);
4133 if (LHS.getOpcode() == ISD::FNEG)
4134 LHS = LHS.getOperand(0);
4135 else if (RHS.getOpcode() == ISD::FNEG)
4136 RHS = RHS.getOperand(0);
4137 else
4138 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4140 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags());
4141 if (Res.getOpcode() != Opc)
4142 return SDValue(); // Op got folded away.
4143 if (!N0.hasOneUse())
4144 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4145 return Res;
4147 case ISD::FMA:
4148 case ISD::FMAD: {
4149 // TODO: handle llvm.amdgcn.fma.legacy
4150 if (!mayIgnoreSignedZero(N0))
4151 return SDValue();
4153 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z))
4154 SDValue LHS = N0.getOperand(0);
4155 SDValue MHS = N0.getOperand(1);
4156 SDValue RHS = N0.getOperand(2);
4158 if (LHS.getOpcode() == ISD::FNEG)
4159 LHS = LHS.getOperand(0);
4160 else if (MHS.getOpcode() == ISD::FNEG)
4161 MHS = MHS.getOperand(0);
4162 else
4163 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS);
4165 if (RHS.getOpcode() != ISD::FNEG)
4166 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4167 else
4168 RHS = RHS.getOperand(0);
4170 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS);
4171 if (Res.getOpcode() != Opc)
4172 return SDValue(); // Op got folded away.
4173 if (!N0.hasOneUse())
4174 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4175 return Res;
4177 case ISD::FMAXNUM:
4178 case ISD::FMINNUM:
4179 case ISD::FMAXNUM_IEEE:
4180 case ISD::FMINNUM_IEEE:
4181 case AMDGPUISD::FMAX_LEGACY:
4182 case AMDGPUISD::FMIN_LEGACY: {
4183 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y)
4184 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y)
4185 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y)
4186 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y)
4188 SDValue LHS = N0.getOperand(0);
4189 SDValue RHS = N0.getOperand(1);
4191 // 0 doesn't have a negated inline immediate.
4192 // TODO: This constant check should be generalized to other operations.
4193 if (isConstantCostlierToNegate(RHS))
4194 return SDValue();
4196 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS);
4197 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS);
4198 unsigned Opposite = inverseMinMax(Opc);
4200 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags());
4201 if (Res.getOpcode() != Opposite)
4202 return SDValue(); // Op got folded away.
4203 if (!N0.hasOneUse())
4204 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res));
4205 return Res;
4207 case AMDGPUISD::FMED3: {
4208 SDValue Ops[3];
4209 for (unsigned I = 0; I < 3; ++I)
4210 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags());
4212 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags());
4213 if (Res.getOpcode() != AMDGPUISD::FMED3)
4214 return SDValue(); // Op got folded away.
4216 if (!N0.hasOneUse()) {
4217 SDValue Neg = DAG.getNode(ISD::FNEG, SL, VT, Res);
4218 DAG.ReplaceAllUsesWith(N0, Neg);
4220 for (SDNode *U : Neg->uses())
4221 DCI.AddToWorklist(U);
4224 return Res;
4226 case ISD::FP_EXTEND:
4227 case ISD::FTRUNC:
4228 case ISD::FRINT:
4229 case ISD::FNEARBYINT: // XXX - Should fround be handled?
4230 case ISD::FSIN:
4231 case ISD::FCANONICALIZE:
4232 case AMDGPUISD::RCP:
4233 case AMDGPUISD::RCP_LEGACY:
4234 case AMDGPUISD::RCP_IFLAG:
4235 case AMDGPUISD::SIN_HW: {
4236 SDValue CvtSrc = N0.getOperand(0);
4237 if (CvtSrc.getOpcode() == ISD::FNEG) {
4238 // (fneg (fp_extend (fneg x))) -> (fp_extend x)
4239 // (fneg (rcp (fneg x))) -> (rcp x)
4240 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0));
4243 if (!N0.hasOneUse())
4244 return SDValue();
4246 // (fneg (fp_extend x)) -> (fp_extend (fneg x))
4247 // (fneg (rcp x)) -> (rcp (fneg x))
4248 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4249 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags());
4251 case ISD::FP_ROUND: {
4252 SDValue CvtSrc = N0.getOperand(0);
4254 if (CvtSrc.getOpcode() == ISD::FNEG) {
4255 // (fneg (fp_round (fneg x))) -> (fp_round x)
4256 return DAG.getNode(ISD::FP_ROUND, SL, VT,
4257 CvtSrc.getOperand(0), N0.getOperand(1));
4260 if (!N0.hasOneUse())
4261 return SDValue();
4263 // (fneg (fp_round x)) -> (fp_round (fneg x))
4264 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc);
4265 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1));
4267 case ISD::FP16_TO_FP: {
4268 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal
4269 // f16, but legalization of f16 fneg ends up pulling it out of the source.
4270 // Put the fneg back as a legal source operation that can be matched later.
4271 SDLoc SL(N);
4273 SDValue Src = N0.getOperand(0);
4274 EVT SrcVT = Src.getValueType();
4276 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000)
4277 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src,
4278 DAG.getConstant(0x8000, SL, SrcVT));
4279 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg);
4281 case ISD::SELECT: {
4282 // fneg (select c, a, b) -> select c, (fneg a), (fneg b)
4283 // TODO: Invert conditions of foldFreeOpFromSelect
4284 return SDValue();
4286 case ISD::BITCAST: {
4287 SDLoc SL(N);
4288 SDValue BCSrc = N0.getOperand(0);
4289 if (BCSrc.getOpcode() == ISD::BUILD_VECTOR) {
4290 SDValue HighBits = BCSrc.getOperand(BCSrc.getNumOperands() - 1);
4291 if (HighBits.getValueType().getSizeInBits() != 32 ||
4292 !fnegFoldsIntoOp(HighBits.getNode()))
4293 return SDValue();
4295 // f64 fneg only really needs to operate on the high half of of the
4296 // register, so try to force it to an f32 operation to help make use of
4297 // source modifiers.
4300 // fneg (f64 (bitcast (build_vector x, y))) ->
4301 // f64 (bitcast (build_vector (bitcast i32:x to f32),
4302 // (fneg (bitcast i32:y to f32)))
4304 SDValue CastHi = DAG.getNode(ISD::BITCAST, SL, MVT::f32, HighBits);
4305 SDValue NegHi = DAG.getNode(ISD::FNEG, SL, MVT::f32, CastHi);
4306 SDValue CastBack =
4307 DAG.getNode(ISD::BITCAST, SL, HighBits.getValueType(), NegHi);
4309 SmallVector<SDValue, 8> Ops(BCSrc->op_begin(), BCSrc->op_end());
4310 Ops.back() = CastBack;
4311 DCI.AddToWorklist(NegHi.getNode());
4312 SDValue Build =
4313 DAG.getNode(ISD::BUILD_VECTOR, SL, BCSrc.getValueType(), Ops);
4314 SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, Build);
4316 if (!N0.hasOneUse())
4317 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Result));
4318 return Result;
4321 if (BCSrc.getOpcode() == ISD::SELECT && VT == MVT::f32 &&
4322 BCSrc.hasOneUse()) {
4323 // fneg (bitcast (f32 (select cond, i32:lhs, i32:rhs))) ->
4324 // select cond, (bitcast i32:lhs to f32), (bitcast i32:rhs to f32)
4326 // TODO: Cast back result for multiple uses is beneficial in some cases.
4328 SDValue LHS =
4329 DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(1));
4330 SDValue RHS =
4331 DAG.getNode(ISD::BITCAST, SL, MVT::f32, BCSrc.getOperand(2));
4333 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, LHS);
4334 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, MVT::f32, RHS);
4336 return DAG.getNode(ISD::SELECT, SL, MVT::f32, BCSrc.getOperand(0), NegLHS,
4337 NegRHS);
4340 return SDValue();
4342 default:
4343 return SDValue();
4347 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N,
4348 DAGCombinerInfo &DCI) const {
4349 SelectionDAG &DAG = DCI.DAG;
4350 SDValue N0 = N->getOperand(0);
4352 if (!N0.hasOneUse())
4353 return SDValue();
4355 switch (N0.getOpcode()) {
4356 case ISD::FP16_TO_FP: {
4357 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal");
4358 SDLoc SL(N);
4359 SDValue Src = N0.getOperand(0);
4360 EVT SrcVT = Src.getValueType();
4362 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff)
4363 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src,
4364 DAG.getConstant(0x7fff, SL, SrcVT));
4365 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs);
4367 default:
4368 return SDValue();
4372 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N,
4373 DAGCombinerInfo &DCI) const {
4374 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0));
4375 if (!CFP)
4376 return SDValue();
4378 // XXX - Should this flush denormals?
4379 const APFloat &Val = CFP->getValueAPF();
4380 APFloat One(Val.getSemantics(), "1.0");
4381 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0));
4384 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
4385 DAGCombinerInfo &DCI) const {
4386 SelectionDAG &DAG = DCI.DAG;
4387 SDLoc DL(N);
4389 switch(N->getOpcode()) {
4390 default:
4391 break;
4392 case ISD::BITCAST: {
4393 EVT DestVT = N->getValueType(0);
4395 // Push casts through vector builds. This helps avoid emitting a large
4396 // number of copies when materializing floating point vector constants.
4398 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) =>
4399 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y))
4400 if (DestVT.isVector()) {
4401 SDValue Src = N->getOperand(0);
4402 if (Src.getOpcode() == ISD::BUILD_VECTOR) {
4403 EVT SrcVT = Src.getValueType();
4404 unsigned NElts = DestVT.getVectorNumElements();
4406 if (SrcVT.getVectorNumElements() == NElts) {
4407 EVT DestEltVT = DestVT.getVectorElementType();
4409 SmallVector<SDValue, 8> CastedElts;
4410 SDLoc SL(N);
4411 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) {
4412 SDValue Elt = Src.getOperand(I);
4413 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt));
4416 return DAG.getBuildVector(DestVT, SL, CastedElts);
4421 if (DestVT.getSizeInBits() != 64 || !DestVT.isVector())
4422 break;
4424 // Fold bitcasts of constants.
4426 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
4427 // TODO: Generalize and move to DAGCombiner
4428 SDValue Src = N->getOperand(0);
4429 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
4430 SDLoc SL(N);
4431 uint64_t CVal = C->getZExtValue();
4432 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4433 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4434 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4435 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV);
4438 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
4439 const APInt &Val = C->getValueAPF().bitcastToAPInt();
4440 SDLoc SL(N);
4441 uint64_t CVal = Val.getZExtValue();
4442 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
4443 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
4444 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
4446 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
4449 break;
4451 case ISD::SHL: {
4452 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4453 break;
4455 return performShlCombine(N, DCI);
4457 case ISD::SRL: {
4458 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4459 break;
4461 return performSrlCombine(N, DCI);
4463 case ISD::SRA: {
4464 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
4465 break;
4467 return performSraCombine(N, DCI);
4469 case ISD::TRUNCATE:
4470 return performTruncateCombine(N, DCI);
4471 case ISD::MUL:
4472 return performMulCombine(N, DCI);
4473 case AMDGPUISD::MUL_U24:
4474 case AMDGPUISD::MUL_I24: {
4475 if (SDValue Simplified = simplifyMul24(N, DCI))
4476 return Simplified;
4477 return performMulCombine(N, DCI);
4479 case AMDGPUISD::MULHI_I24:
4480 case AMDGPUISD::MULHI_U24:
4481 return simplifyMul24(N, DCI);
4482 case ISD::SMUL_LOHI:
4483 case ISD::UMUL_LOHI:
4484 return performMulLoHiCombine(N, DCI);
4485 case ISD::MULHS:
4486 return performMulhsCombine(N, DCI);
4487 case ISD::MULHU:
4488 return performMulhuCombine(N, DCI);
4489 case ISD::SELECT:
4490 return performSelectCombine(N, DCI);
4491 case ISD::FNEG:
4492 return performFNegCombine(N, DCI);
4493 case ISD::FABS:
4494 return performFAbsCombine(N, DCI);
4495 case AMDGPUISD::BFE_I32:
4496 case AMDGPUISD::BFE_U32: {
4497 assert(!N->getValueType(0).isVector() &&
4498 "Vector handling of BFE not implemented");
4499 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
4500 if (!Width)
4501 break;
4503 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
4504 if (WidthVal == 0)
4505 return DAG.getConstant(0, DL, MVT::i32);
4507 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
4508 if (!Offset)
4509 break;
4511 SDValue BitsFrom = N->getOperand(0);
4512 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
4514 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
4516 if (OffsetVal == 0) {
4517 // This is already sign / zero extended, so try to fold away extra BFEs.
4518 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
4520 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
4521 if (OpSignBits >= SignBits)
4522 return BitsFrom;
4524 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
4525 if (Signed) {
4526 // This is a sign_extend_inreg. Replace it to take advantage of existing
4527 // DAG Combines. If not eliminated, we will match back to BFE during
4528 // selection.
4530 // TODO: The sext_inreg of extended types ends, although we can could
4531 // handle them in a single BFE.
4532 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
4533 DAG.getValueType(SmallVT));
4536 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
4539 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
4540 if (Signed) {
4541 return constantFoldBFE<int32_t>(DAG,
4542 CVal->getSExtValue(),
4543 OffsetVal,
4544 WidthVal,
4545 DL);
4548 return constantFoldBFE<uint32_t>(DAG,
4549 CVal->getZExtValue(),
4550 OffsetVal,
4551 WidthVal,
4552 DL);
4555 if ((OffsetVal + WidthVal) >= 32 &&
4556 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) {
4557 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
4558 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
4559 BitsFrom, ShiftVal);
4562 if (BitsFrom.hasOneUse()) {
4563 APInt Demanded = APInt::getBitsSet(32,
4564 OffsetVal,
4565 OffsetVal + WidthVal);
4567 KnownBits Known;
4568 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
4569 !DCI.isBeforeLegalizeOps());
4570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4571 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) ||
4572 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) {
4573 DCI.CommitTargetLoweringOpt(TLO);
4577 break;
4579 case ISD::LOAD:
4580 return performLoadCombine(N, DCI);
4581 case ISD::STORE:
4582 return performStoreCombine(N, DCI);
4583 case AMDGPUISD::RCP:
4584 case AMDGPUISD::RCP_IFLAG:
4585 return performRcpCombine(N, DCI);
4586 case ISD::AssertZext:
4587 case ISD::AssertSext:
4588 return performAssertSZExtCombine(N, DCI);
4589 case ISD::INTRINSIC_WO_CHAIN:
4590 return performIntrinsicWOChainCombine(N, DCI);
4592 return SDValue();
4595 //===----------------------------------------------------------------------===//
4596 // Helper functions
4597 //===----------------------------------------------------------------------===//
4599 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
4600 const TargetRegisterClass *RC,
4601 Register Reg, EVT VT,
4602 const SDLoc &SL,
4603 bool RawReg) const {
4604 MachineFunction &MF = DAG.getMachineFunction();
4605 MachineRegisterInfo &MRI = MF.getRegInfo();
4606 Register VReg;
4608 if (!MRI.isLiveIn(Reg)) {
4609 VReg = MRI.createVirtualRegister(RC);
4610 MRI.addLiveIn(Reg, VReg);
4611 } else {
4612 VReg = MRI.getLiveInVirtReg(Reg);
4615 if (RawReg)
4616 return DAG.getRegister(VReg, VT);
4618 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT);
4621 // This may be called multiple times, and nothing prevents creating multiple
4622 // objects at the same offset. See if we already defined this object.
4623 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size,
4624 int64_t Offset) {
4625 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) {
4626 if (MFI.getObjectOffset(I) == Offset) {
4627 assert(MFI.getObjectSize(I) == Size);
4628 return I;
4632 return MFI.CreateFixedObject(Size, Offset, true);
4635 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG,
4636 EVT VT,
4637 const SDLoc &SL,
4638 int64_t Offset) const {
4639 MachineFunction &MF = DAG.getMachineFunction();
4640 MachineFrameInfo &MFI = MF.getFrameInfo();
4641 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset);
4643 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset);
4644 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32);
4646 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, Align(4),
4647 MachineMemOperand::MODereferenceable |
4648 MachineMemOperand::MOInvariant);
4651 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG,
4652 const SDLoc &SL,
4653 SDValue Chain,
4654 SDValue ArgVal,
4655 int64_t Offset) const {
4656 MachineFunction &MF = DAG.getMachineFunction();
4657 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset);
4658 const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
4660 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32);
4661 // Stores to the argument stack area are relative to the stack pointer.
4662 SDValue SP =
4663 DAG.getCopyFromReg(Chain, SL, Info->getStackPtrOffsetReg(), MVT::i32);
4664 Ptr = DAG.getNode(ISD::ADD, SL, MVT::i32, SP, Ptr);
4665 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, Align(4),
4666 MachineMemOperand::MODereferenceable);
4667 return Store;
4670 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG,
4671 const TargetRegisterClass *RC,
4672 EVT VT, const SDLoc &SL,
4673 const ArgDescriptor &Arg) const {
4674 assert(Arg && "Attempting to load missing argument");
4676 SDValue V = Arg.isRegister() ?
4677 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) :
4678 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset());
4680 if (!Arg.isMasked())
4681 return V;
4683 unsigned Mask = Arg.getMask();
4684 unsigned Shift = llvm::countr_zero<unsigned>(Mask);
4685 V = DAG.getNode(ISD::SRL, SL, VT, V,
4686 DAG.getShiftAmountConstant(Shift, VT, SL));
4687 return DAG.getNode(ISD::AND, SL, VT, V,
4688 DAG.getConstant(Mask >> Shift, SL, VT));
4691 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4692 uint64_t ExplicitKernArgSize, const ImplicitParameter Param) const {
4693 unsigned ExplicitArgOffset = Subtarget->getExplicitKernelArgOffset();
4694 const Align Alignment = Subtarget->getAlignmentForImplicitArgPtr();
4695 uint64_t ArgOffset =
4696 alignTo(ExplicitKernArgSize, Alignment) + ExplicitArgOffset;
4697 switch (Param) {
4698 case FIRST_IMPLICIT:
4699 return ArgOffset;
4700 case PRIVATE_BASE:
4701 return ArgOffset + AMDGPU::ImplicitArg::PRIVATE_BASE_OFFSET;
4702 case SHARED_BASE:
4703 return ArgOffset + AMDGPU::ImplicitArg::SHARED_BASE_OFFSET;
4704 case QUEUE_PTR:
4705 return ArgOffset + AMDGPU::ImplicitArg::QUEUE_PTR_OFFSET;
4707 llvm_unreachable("unexpected implicit parameter type");
4710 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
4711 const MachineFunction &MF, const ImplicitParameter Param) const {
4712 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>();
4713 return getImplicitParameterOffset(MFI->getExplicitKernArgSize(), Param);
4716 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
4718 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
4719 switch ((AMDGPUISD::NodeType)Opcode) {
4720 case AMDGPUISD::FIRST_NUMBER: break;
4721 // AMDIL DAG nodes
4722 NODE_NAME_CASE(UMUL);
4723 NODE_NAME_CASE(BRANCH_COND);
4725 // AMDGPU DAG nodes
4726 NODE_NAME_CASE(IF)
4727 NODE_NAME_CASE(ELSE)
4728 NODE_NAME_CASE(LOOP)
4729 NODE_NAME_CASE(CALL)
4730 NODE_NAME_CASE(TC_RETURN)
4731 NODE_NAME_CASE(TC_RETURN_GFX)
4732 NODE_NAME_CASE(TRAP)
4733 NODE_NAME_CASE(RET_GLUE)
4734 NODE_NAME_CASE(RETURN_TO_EPILOG)
4735 NODE_NAME_CASE(ENDPGM)
4736 NODE_NAME_CASE(ENDPGM_TRAP)
4737 NODE_NAME_CASE(DWORDADDR)
4738 NODE_NAME_CASE(FRACT)
4739 NODE_NAME_CASE(SETCC)
4740 NODE_NAME_CASE(SETREG)
4741 NODE_NAME_CASE(DENORM_MODE)
4742 NODE_NAME_CASE(FMA_W_CHAIN)
4743 NODE_NAME_CASE(FMUL_W_CHAIN)
4744 NODE_NAME_CASE(CLAMP)
4745 NODE_NAME_CASE(COS_HW)
4746 NODE_NAME_CASE(SIN_HW)
4747 NODE_NAME_CASE(FMAX_LEGACY)
4748 NODE_NAME_CASE(FMIN_LEGACY)
4749 NODE_NAME_CASE(FMAX3)
4750 NODE_NAME_CASE(SMAX3)
4751 NODE_NAME_CASE(UMAX3)
4752 NODE_NAME_CASE(FMIN3)
4753 NODE_NAME_CASE(SMIN3)
4754 NODE_NAME_CASE(UMIN3)
4755 NODE_NAME_CASE(FMED3)
4756 NODE_NAME_CASE(SMED3)
4757 NODE_NAME_CASE(UMED3)
4758 NODE_NAME_CASE(FDOT2)
4759 NODE_NAME_CASE(URECIP)
4760 NODE_NAME_CASE(DIV_SCALE)
4761 NODE_NAME_CASE(DIV_FMAS)
4762 NODE_NAME_CASE(DIV_FIXUP)
4763 NODE_NAME_CASE(FMAD_FTZ)
4764 NODE_NAME_CASE(RCP)
4765 NODE_NAME_CASE(RSQ)
4766 NODE_NAME_CASE(RCP_LEGACY)
4767 NODE_NAME_CASE(RCP_IFLAG)
4768 NODE_NAME_CASE(LOG)
4769 NODE_NAME_CASE(EXP)
4770 NODE_NAME_CASE(FMUL_LEGACY)
4771 NODE_NAME_CASE(RSQ_CLAMP)
4772 NODE_NAME_CASE(FP_CLASS)
4773 NODE_NAME_CASE(DOT4)
4774 NODE_NAME_CASE(CARRY)
4775 NODE_NAME_CASE(BORROW)
4776 NODE_NAME_CASE(BFE_U32)
4777 NODE_NAME_CASE(BFE_I32)
4778 NODE_NAME_CASE(BFI)
4779 NODE_NAME_CASE(BFM)
4780 NODE_NAME_CASE(FFBH_U32)
4781 NODE_NAME_CASE(FFBH_I32)
4782 NODE_NAME_CASE(FFBL_B32)
4783 NODE_NAME_CASE(MUL_U24)
4784 NODE_NAME_CASE(MUL_I24)
4785 NODE_NAME_CASE(MULHI_U24)
4786 NODE_NAME_CASE(MULHI_I24)
4787 NODE_NAME_CASE(MAD_U24)
4788 NODE_NAME_CASE(MAD_I24)
4789 NODE_NAME_CASE(MAD_I64_I32)
4790 NODE_NAME_CASE(MAD_U64_U32)
4791 NODE_NAME_CASE(PERM)
4792 NODE_NAME_CASE(TEXTURE_FETCH)
4793 NODE_NAME_CASE(R600_EXPORT)
4794 NODE_NAME_CASE(CONST_ADDRESS)
4795 NODE_NAME_CASE(REGISTER_LOAD)
4796 NODE_NAME_CASE(REGISTER_STORE)
4797 NODE_NAME_CASE(SAMPLE)
4798 NODE_NAME_CASE(SAMPLEB)
4799 NODE_NAME_CASE(SAMPLED)
4800 NODE_NAME_CASE(SAMPLEL)
4801 NODE_NAME_CASE(CVT_F32_UBYTE0)
4802 NODE_NAME_CASE(CVT_F32_UBYTE1)
4803 NODE_NAME_CASE(CVT_F32_UBYTE2)
4804 NODE_NAME_CASE(CVT_F32_UBYTE3)
4805 NODE_NAME_CASE(CVT_PKRTZ_F16_F32)
4806 NODE_NAME_CASE(CVT_PKNORM_I16_F32)
4807 NODE_NAME_CASE(CVT_PKNORM_U16_F32)
4808 NODE_NAME_CASE(CVT_PK_I16_I32)
4809 NODE_NAME_CASE(CVT_PK_U16_U32)
4810 NODE_NAME_CASE(FP_TO_FP16)
4811 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
4812 NODE_NAME_CASE(CONST_DATA_PTR)
4813 NODE_NAME_CASE(PC_ADD_REL_OFFSET)
4814 NODE_NAME_CASE(LDS)
4815 NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
4816 NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
4817 NODE_NAME_CASE(DUMMY_CHAIN)
4818 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
4819 NODE_NAME_CASE(LOAD_D16_HI)
4820 NODE_NAME_CASE(LOAD_D16_LO)
4821 NODE_NAME_CASE(LOAD_D16_HI_I8)
4822 NODE_NAME_CASE(LOAD_D16_HI_U8)
4823 NODE_NAME_CASE(LOAD_D16_LO_I8)
4824 NODE_NAME_CASE(LOAD_D16_LO_U8)
4825 NODE_NAME_CASE(STORE_MSKOR)
4826 NODE_NAME_CASE(LOAD_CONSTANT)
4827 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
4828 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16)
4829 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT)
4830 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16)
4831 NODE_NAME_CASE(DS_ORDERED_COUNT)
4832 NODE_NAME_CASE(ATOMIC_CMP_SWAP)
4833 NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
4834 NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
4835 NODE_NAME_CASE(BUFFER_LOAD)
4836 NODE_NAME_CASE(BUFFER_LOAD_UBYTE)
4837 NODE_NAME_CASE(BUFFER_LOAD_USHORT)
4838 NODE_NAME_CASE(BUFFER_LOAD_BYTE)
4839 NODE_NAME_CASE(BUFFER_LOAD_SHORT)
4840 NODE_NAME_CASE(BUFFER_LOAD_FORMAT)
4841 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_TFE)
4842 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16)
4843 NODE_NAME_CASE(SBUFFER_LOAD)
4844 NODE_NAME_CASE(BUFFER_STORE)
4845 NODE_NAME_CASE(BUFFER_STORE_BYTE)
4846 NODE_NAME_CASE(BUFFER_STORE_SHORT)
4847 NODE_NAME_CASE(BUFFER_STORE_FORMAT)
4848 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16)
4849 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP)
4850 NODE_NAME_CASE(BUFFER_ATOMIC_ADD)
4851 NODE_NAME_CASE(BUFFER_ATOMIC_SUB)
4852 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN)
4853 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN)
4854 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX)
4855 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX)
4856 NODE_NAME_CASE(BUFFER_ATOMIC_AND)
4857 NODE_NAME_CASE(BUFFER_ATOMIC_OR)
4858 NODE_NAME_CASE(BUFFER_ATOMIC_XOR)
4859 NODE_NAME_CASE(BUFFER_ATOMIC_INC)
4860 NODE_NAME_CASE(BUFFER_ATOMIC_DEC)
4861 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP)
4862 NODE_NAME_CASE(BUFFER_ATOMIC_CSUB)
4863 NODE_NAME_CASE(BUFFER_ATOMIC_FADD)
4864 NODE_NAME_CASE(BUFFER_ATOMIC_FMIN)
4865 NODE_NAME_CASE(BUFFER_ATOMIC_FMAX)
4867 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
4869 return nullptr;
4872 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand,
4873 SelectionDAG &DAG, int Enabled,
4874 int &RefinementSteps,
4875 bool &UseOneConstNR,
4876 bool Reciprocal) const {
4877 EVT VT = Operand.getValueType();
4879 if (VT == MVT::f32) {
4880 RefinementSteps = 0;
4881 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
4884 // TODO: There is also f64 rsq instruction, but the documentation is less
4885 // clear on its precision.
4887 return SDValue();
4890 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
4891 SelectionDAG &DAG, int Enabled,
4892 int &RefinementSteps) const {
4893 EVT VT = Operand.getValueType();
4895 if (VT == MVT::f32) {
4896 // Reciprocal, < 1 ulp error.
4898 // This reciprocal approximation converges to < 0.5 ulp error with one
4899 // newton rhapson performed with two fused multiple adds (FMAs).
4901 RefinementSteps = 0;
4902 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
4905 // TODO: There is also f64 rcp instruction, but the documentation is less
4906 // clear on its precision.
4908 return SDValue();
4911 static unsigned workitemIntrinsicDim(unsigned ID) {
4912 switch (ID) {
4913 case Intrinsic::amdgcn_workitem_id_x:
4914 return 0;
4915 case Intrinsic::amdgcn_workitem_id_y:
4916 return 1;
4917 case Intrinsic::amdgcn_workitem_id_z:
4918 return 2;
4919 default:
4920 llvm_unreachable("not a workitem intrinsic");
4924 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
4925 const SDValue Op, KnownBits &Known,
4926 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
4928 Known.resetAll(); // Don't know anything.
4930 unsigned Opc = Op.getOpcode();
4932 switch (Opc) {
4933 default:
4934 break;
4935 case AMDGPUISD::CARRY:
4936 case AMDGPUISD::BORROW: {
4937 Known.Zero = APInt::getHighBitsSet(32, 31);
4938 break;
4941 case AMDGPUISD::BFE_I32:
4942 case AMDGPUISD::BFE_U32: {
4943 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
4944 if (!CWidth)
4945 return;
4947 uint32_t Width = CWidth->getZExtValue() & 0x1f;
4949 if (Opc == AMDGPUISD::BFE_U32)
4950 Known.Zero = APInt::getHighBitsSet(32, 32 - Width);
4952 break;
4954 case AMDGPUISD::FP_TO_FP16: {
4955 unsigned BitWidth = Known.getBitWidth();
4957 // High bits are zero.
4958 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
4959 break;
4961 case AMDGPUISD::MUL_U24:
4962 case AMDGPUISD::MUL_I24: {
4963 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
4964 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
4965 unsigned TrailZ = LHSKnown.countMinTrailingZeros() +
4966 RHSKnown.countMinTrailingZeros();
4967 Known.Zero.setLowBits(std::min(TrailZ, 32u));
4968 // Skip extra check if all bits are known zeros.
4969 if (TrailZ >= 32)
4970 break;
4972 // Truncate to 24 bits.
4973 LHSKnown = LHSKnown.trunc(24);
4974 RHSKnown = RHSKnown.trunc(24);
4976 if (Opc == AMDGPUISD::MUL_I24) {
4977 unsigned LHSValBits = LHSKnown.countMaxSignificantBits();
4978 unsigned RHSValBits = RHSKnown.countMaxSignificantBits();
4979 unsigned MaxValBits = LHSValBits + RHSValBits;
4980 if (MaxValBits > 32)
4981 break;
4982 unsigned SignBits = 32 - MaxValBits + 1;
4983 bool LHSNegative = LHSKnown.isNegative();
4984 bool LHSNonNegative = LHSKnown.isNonNegative();
4985 bool LHSPositive = LHSKnown.isStrictlyPositive();
4986 bool RHSNegative = RHSKnown.isNegative();
4987 bool RHSNonNegative = RHSKnown.isNonNegative();
4988 bool RHSPositive = RHSKnown.isStrictlyPositive();
4990 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative))
4991 Known.Zero.setHighBits(SignBits);
4992 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative))
4993 Known.One.setHighBits(SignBits);
4994 } else {
4995 unsigned LHSValBits = LHSKnown.countMaxActiveBits();
4996 unsigned RHSValBits = RHSKnown.countMaxActiveBits();
4997 unsigned MaxValBits = LHSValBits + RHSValBits;
4998 if (MaxValBits >= 32)
4999 break;
5000 Known.Zero.setBitsFrom(MaxValBits);
5002 break;
5004 case AMDGPUISD::PERM: {
5005 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5006 if (!CMask)
5007 return;
5009 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5010 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5011 unsigned Sel = CMask->getZExtValue();
5013 for (unsigned I = 0; I < 32; I += 8) {
5014 unsigned SelBits = Sel & 0xff;
5015 if (SelBits < 4) {
5016 SelBits *= 8;
5017 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5018 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5019 } else if (SelBits < 7) {
5020 SelBits = (SelBits & 3) * 8;
5021 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I;
5022 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I;
5023 } else if (SelBits == 0x0c) {
5024 Known.Zero |= 0xFFull << I;
5025 } else if (SelBits > 0x0c) {
5026 Known.One |= 0xFFull << I;
5028 Sel >>= 8;
5030 break;
5032 case AMDGPUISD::BUFFER_LOAD_UBYTE: {
5033 Known.Zero.setHighBits(24);
5034 break;
5036 case AMDGPUISD::BUFFER_LOAD_USHORT: {
5037 Known.Zero.setHighBits(16);
5038 break;
5040 case AMDGPUISD::LDS: {
5041 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode());
5042 Align Alignment = GA->getGlobal()->getPointerAlignment(DAG.getDataLayout());
5044 Known.Zero.setHighBits(16);
5045 Known.Zero.setLowBits(Log2(Alignment));
5046 break;
5048 case AMDGPUISD::SMIN3:
5049 case AMDGPUISD::SMAX3:
5050 case AMDGPUISD::SMED3:
5051 case AMDGPUISD::UMIN3:
5052 case AMDGPUISD::UMAX3:
5053 case AMDGPUISD::UMED3: {
5054 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(2), Depth + 1);
5055 if (Known2.isUnknown())
5056 break;
5058 KnownBits Known1 = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
5059 if (Known1.isUnknown())
5060 break;
5062 KnownBits Known0 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
5063 if (Known0.isUnknown())
5064 break;
5066 // TODO: Handle LeadZero/LeadOne from UMIN/UMAX handling.
5067 Known.Zero = Known0.Zero & Known1.Zero & Known2.Zero;
5068 Known.One = Known0.One & Known1.One & Known2.One;
5069 break;
5071 case ISD::INTRINSIC_WO_CHAIN: {
5072 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5073 switch (IID) {
5074 case Intrinsic::amdgcn_workitem_id_x:
5075 case Intrinsic::amdgcn_workitem_id_y:
5076 case Intrinsic::amdgcn_workitem_id_z: {
5077 unsigned MaxValue = Subtarget->getMaxWorkitemID(
5078 DAG.getMachineFunction().getFunction(), workitemIntrinsicDim(IID));
5079 Known.Zero.setHighBits(llvm::countl_zero(MaxValue));
5080 break;
5082 default:
5083 break;
5089 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
5090 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
5091 unsigned Depth) const {
5092 switch (Op.getOpcode()) {
5093 case AMDGPUISD::BFE_I32: {
5094 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5095 if (!Width)
5096 return 1;
5098 unsigned SignBits = 32 - Width->getZExtValue() + 1;
5099 if (!isNullConstant(Op.getOperand(1)))
5100 return SignBits;
5102 // TODO: Could probably figure something out with non-0 offsets.
5103 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5104 return std::max(SignBits, Op0SignBits);
5107 case AMDGPUISD::BFE_U32: {
5108 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
5109 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
5112 case AMDGPUISD::CARRY:
5113 case AMDGPUISD::BORROW:
5114 return 31;
5115 case AMDGPUISD::BUFFER_LOAD_BYTE:
5116 return 25;
5117 case AMDGPUISD::BUFFER_LOAD_SHORT:
5118 return 17;
5119 case AMDGPUISD::BUFFER_LOAD_UBYTE:
5120 return 24;
5121 case AMDGPUISD::BUFFER_LOAD_USHORT:
5122 return 16;
5123 case AMDGPUISD::FP_TO_FP16:
5124 return 16;
5125 case AMDGPUISD::SMIN3:
5126 case AMDGPUISD::SMAX3:
5127 case AMDGPUISD::SMED3:
5128 case AMDGPUISD::UMIN3:
5129 case AMDGPUISD::UMAX3:
5130 case AMDGPUISD::UMED3: {
5131 unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(2), Depth + 1);
5132 if (Tmp2 == 1)
5133 return 1; // Early out.
5135 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth + 1);
5136 if (Tmp1 == 1)
5137 return 1; // Early out.
5139 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
5140 if (Tmp0 == 1)
5141 return 1; // Early out.
5143 return std::min(Tmp0, std::min(Tmp1, Tmp2));
5145 default:
5146 return 1;
5150 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr(
5151 GISelKnownBits &Analysis, Register R,
5152 const APInt &DemandedElts, const MachineRegisterInfo &MRI,
5153 unsigned Depth) const {
5154 const MachineInstr *MI = MRI.getVRegDef(R);
5155 if (!MI)
5156 return 1;
5158 // TODO: Check range metadata on MMO.
5159 switch (MI->getOpcode()) {
5160 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE:
5161 return 25;
5162 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT:
5163 return 17;
5164 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE:
5165 return 24;
5166 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT:
5167 return 16;
5168 case AMDGPU::G_AMDGPU_SMED3:
5169 case AMDGPU::G_AMDGPU_UMED3: {
5170 auto [Dst, Src0, Src1, Src2] = MI->getFirst4Regs();
5171 unsigned Tmp2 = Analysis.computeNumSignBits(Src2, DemandedElts, Depth + 1);
5172 if (Tmp2 == 1)
5173 return 1;
5174 unsigned Tmp1 = Analysis.computeNumSignBits(Src1, DemandedElts, Depth + 1);
5175 if (Tmp1 == 1)
5176 return 1;
5177 unsigned Tmp0 = Analysis.computeNumSignBits(Src0, DemandedElts, Depth + 1);
5178 if (Tmp0 == 1)
5179 return 1;
5180 return std::min(Tmp0, std::min(Tmp1, Tmp2));
5182 default:
5183 return 1;
5187 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
5188 const SelectionDAG &DAG,
5189 bool SNaN,
5190 unsigned Depth) const {
5191 unsigned Opcode = Op.getOpcode();
5192 switch (Opcode) {
5193 case AMDGPUISD::FMIN_LEGACY:
5194 case AMDGPUISD::FMAX_LEGACY: {
5195 if (SNaN)
5196 return true;
5198 // TODO: Can check no nans on one of the operands for each one, but which
5199 // one?
5200 return false;
5202 case AMDGPUISD::FMUL_LEGACY:
5203 case AMDGPUISD::CVT_PKRTZ_F16_F32: {
5204 if (SNaN)
5205 return true;
5206 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5207 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5209 case AMDGPUISD::FMED3:
5210 case AMDGPUISD::FMIN3:
5211 case AMDGPUISD::FMAX3:
5212 case AMDGPUISD::FMAD_FTZ: {
5213 if (SNaN)
5214 return true;
5215 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) &&
5216 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5217 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5219 case AMDGPUISD::CVT_F32_UBYTE0:
5220 case AMDGPUISD::CVT_F32_UBYTE1:
5221 case AMDGPUISD::CVT_F32_UBYTE2:
5222 case AMDGPUISD::CVT_F32_UBYTE3:
5223 return true;
5225 case AMDGPUISD::RCP:
5226 case AMDGPUISD::RSQ:
5227 case AMDGPUISD::RCP_LEGACY:
5228 case AMDGPUISD::RSQ_CLAMP: {
5229 if (SNaN)
5230 return true;
5232 // TODO: Need is known positive check.
5233 return false;
5235 case ISD::FLDEXP:
5236 case AMDGPUISD::FRACT: {
5237 if (SNaN)
5238 return true;
5239 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1);
5241 case AMDGPUISD::DIV_SCALE:
5242 case AMDGPUISD::DIV_FMAS:
5243 case AMDGPUISD::DIV_FIXUP:
5244 // TODO: Refine on operands.
5245 return SNaN;
5246 case AMDGPUISD::SIN_HW:
5247 case AMDGPUISD::COS_HW: {
5248 // TODO: Need check for infinity
5249 return SNaN;
5251 case ISD::INTRINSIC_WO_CHAIN: {
5252 unsigned IntrinsicID
5253 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5254 // TODO: Handle more intrinsics
5255 switch (IntrinsicID) {
5256 case Intrinsic::amdgcn_cubeid:
5257 return true;
5259 case Intrinsic::amdgcn_frexp_mant: {
5260 if (SNaN)
5261 return true;
5262 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1);
5264 case Intrinsic::amdgcn_cvt_pkrtz: {
5265 if (SNaN)
5266 return true;
5267 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5268 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1);
5270 case Intrinsic::amdgcn_rcp:
5271 case Intrinsic::amdgcn_rsq:
5272 case Intrinsic::amdgcn_rcp_legacy:
5273 case Intrinsic::amdgcn_rsq_legacy:
5274 case Intrinsic::amdgcn_rsq_clamp: {
5275 if (SNaN)
5276 return true;
5278 // TODO: Need is known positive check.
5279 return false;
5281 case Intrinsic::amdgcn_trig_preop:
5282 case Intrinsic::amdgcn_fdot2:
5283 // TODO: Refine on operand
5284 return SNaN;
5285 case Intrinsic::amdgcn_fma_legacy:
5286 if (SNaN)
5287 return true;
5288 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) &&
5289 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1) &&
5290 DAG.isKnownNeverNaN(Op.getOperand(3), SNaN, Depth + 1);
5291 default:
5292 return false;
5295 default:
5296 return false;
5300 bool AMDGPUTargetLowering::isReassocProfitable(MachineRegisterInfo &MRI,
5301 Register N0, Register N1) const {
5302 return true; // FIXME: handle regbanks
5305 TargetLowering::AtomicExpansionKind
5306 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
5307 switch (RMW->getOperation()) {
5308 case AtomicRMWInst::Nand:
5309 case AtomicRMWInst::FAdd:
5310 case AtomicRMWInst::FSub:
5311 case AtomicRMWInst::FMax:
5312 case AtomicRMWInst::FMin:
5313 return AtomicExpansionKind::CmpXChg;
5314 default: {
5315 if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
5316 unsigned Size = IntTy->getBitWidth();
5317 if (Size == 32 || Size == 64)
5318 return AtomicExpansionKind::None;
5321 return AtomicExpansionKind::CmpXChg;
5326 bool AMDGPUTargetLowering::isConstantUnsignedBitfieldExtractLegal(
5327 unsigned Opc, LLT Ty1, LLT Ty2) const {
5328 return (Ty1 == LLT::scalar(32) || Ty1 == LLT::scalar(64)) &&
5329 Ty2 == LLT::scalar(32);
5332 /// Whether it is profitable to sink the operands of an
5333 /// Instruction I to the basic block of I.
5334 /// This helps using several modifiers (like abs and neg) more often.
5335 bool AMDGPUTargetLowering::shouldSinkOperands(
5336 Instruction *I, SmallVectorImpl<Use *> &Ops) const {
5337 using namespace PatternMatch;
5339 for (auto &Op : I->operands()) {
5340 // Ensure we are not already sinking this operand.
5341 if (any_of(Ops, [&](Use *U) { return U->get() == Op.get(); }))
5342 continue;
5344 if (match(&Op, m_FAbs(m_Value())) || match(&Op, m_FNeg(m_Value())))
5345 Ops.push_back(&Op);
5348 return !Ops.empty();