Hanle i8 returns
[llvm/msp430.git] / lib / Target / IA64 / IA64ISelLowering.cpp
blob34a0686564c07c8f1314ff9d8cdabb89aa745867
1 //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the IA64ISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "IA64ISelLowering.h"
15 #include "IA64MachineFunctionInfo.h"
16 #include "IA64TargetMachine.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 using namespace llvm;
26 IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
27 : TargetLowering(TM) {
29 // register class for general registers
30 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
32 // register class for FP registers
33 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
35 // register class for predicate registers
36 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
38 setLoadExtAction(ISD::EXTLOAD , MVT::i1 , Promote);
40 setLoadExtAction(ISD::ZEXTLOAD , MVT::i1 , Promote);
42 setLoadExtAction(ISD::SEXTLOAD , MVT::i1 , Promote);
43 setLoadExtAction(ISD::SEXTLOAD , MVT::i8 , Expand);
44 setLoadExtAction(ISD::SEXTLOAD , MVT::i16 , Expand);
45 setLoadExtAction(ISD::SEXTLOAD , MVT::i32 , Expand);
47 setOperationAction(ISD::BRIND , MVT::Other, Expand);
48 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
49 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
50 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
52 // ia64 uses SELECT not SELECT_CC
53 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
55 // We need to handle ISD::RET for void functions ourselves,
56 // so we get a chance to restore ar.pfs before adding a
57 // br.ret insn
58 setOperationAction(ISD::RET, MVT::Other, Custom);
60 setShiftAmountType(MVT::i64);
62 setOperationAction(ISD::FREM , MVT::f32 , Expand);
63 setOperationAction(ISD::FREM , MVT::f64 , Expand);
65 setOperationAction(ISD::UREM , MVT::f32 , Expand);
66 setOperationAction(ISD::UREM , MVT::f64 , Expand);
68 setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
70 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
71 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
73 // We don't support sin/cos/sqrt/pow
74 setOperationAction(ISD::FSIN , MVT::f64, Expand);
75 setOperationAction(ISD::FCOS , MVT::f64, Expand);
76 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
77 setOperationAction(ISD::FPOW , MVT::f64, Expand);
78 setOperationAction(ISD::FSIN , MVT::f32, Expand);
79 setOperationAction(ISD::FCOS , MVT::f32, Expand);
80 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
81 setOperationAction(ISD::FPOW , MVT::f32, Expand);
83 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
85 // FIXME: IA64 supports fcopysign natively!
86 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
87 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
89 // We don't have line number support yet.
90 setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
91 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
92 setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
93 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
95 // IA64 has ctlz in the form of the 'fnorm' instruction. The Legalizer
96 // expansion for ctlz/cttz in terms of ctpop is much larger, but lower
97 // latency.
98 // FIXME: Custom lower CTLZ when compiling for size?
99 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
100 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
101 setOperationAction(ISD::ROTL , MVT::i64 , Expand);
102 setOperationAction(ISD::ROTR , MVT::i64 , Expand);
104 // FIXME: IA64 has this, but is not implemented. should be mux @rev
105 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
107 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
108 setOperationAction(ISD::VAARG , MVT::Other, Custom);
109 setOperationAction(ISD::VASTART , MVT::Other, Custom);
111 // Use the default implementation.
112 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
113 setOperationAction(ISD::VAEND , MVT::Other, Expand);
114 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
115 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
116 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
118 // Thread Local Storage
119 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
121 setStackPointerRegisterToSaveRestore(IA64::r12);
123 setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
124 setJumpBufAlignment(16); // ...and must be 16-byte aligned
126 computeRegisterProperties();
128 addLegalFPImmediate(APFloat(+0.0));
129 addLegalFPImmediate(APFloat(-0.0));
130 addLegalFPImmediate(APFloat(+1.0));
131 addLegalFPImmediate(APFloat(-1.0));
134 const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
135 switch (Opcode) {
136 default: return 0;
137 case IA64ISD::GETFD: return "IA64ISD::GETFD";
138 case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
139 case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
143 MVT IA64TargetLowering::getSetCCResultType(MVT VT) const {
144 return MVT::i1;
147 void IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG,
148 SmallVectorImpl<SDValue> &ArgValues,
149 DebugLoc dl) {
151 // add beautiful description of IA64 stack frame format
152 // here (from intel 24535803.pdf most likely)
154 MachineFunction &MF = DAG.getMachineFunction();
155 MachineFrameInfo *MFI = MF.getFrameInfo();
156 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
158 GP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
159 SP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
160 RP = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
162 MachineBasicBlock& BB = MF.front();
164 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
165 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
167 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
168 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
170 unsigned argVreg[8];
171 unsigned argPreg[8];
172 unsigned argOpc[8];
174 unsigned used_FPArgs = 0; // how many FP args have been used so far?
176 unsigned ArgOffset = 0;
177 int count = 0;
179 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
181 SDValue newroot, argt;
182 if(count < 8) { // need to fix this logic? maybe.
184 switch (getValueType(I->getType()).getSimpleVT()) {
185 default:
186 assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
187 case MVT::f32:
188 // fixme? (well, will need to for weird FP structy stuff,
189 // see intel ABI docs)
190 case MVT::f64:
191 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
192 MF.getRegInfo().addLiveIn(args_FP[used_FPArgs]);
193 // mark this reg as liveIn
194 // floating point args go into f8..f15 as-needed, the increment
195 argVreg[count] = // is below..:
196 MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::f64));
197 // FP args go into f8..f15 as needed: (hence the ++)
198 argPreg[count] = args_FP[used_FPArgs++];
199 argOpc[count] = IA64::FMOV;
200 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), dl,
201 argVreg[count], MVT::f64);
202 if (I->getType() == Type::FloatTy)
203 argt = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, argt,
204 DAG.getIntPtrConstant(0));
205 break;
206 case MVT::i1: // NOTE: as far as C abi stuff goes,
207 // bools are just boring old ints
208 case MVT::i8:
209 case MVT::i16:
210 case MVT::i32:
211 case MVT::i64:
212 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
213 MF.getRegInfo().addLiveIn(args_int[count]);
214 // mark this register as liveIn
215 argVreg[count] =
216 MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
217 argPreg[count] = args_int[count];
218 argOpc[count] = IA64::MOV;
219 argt = newroot =
220 DAG.getCopyFromReg(DAG.getRoot(), dl, argVreg[count], MVT::i64);
221 if ( getValueType(I->getType()) != MVT::i64)
222 argt = DAG.getNode(ISD::TRUNCATE, dl, getValueType(I->getType()),
223 newroot);
224 break;
226 } else { // more than 8 args go into the frame
227 // Create the frame index object for this incoming parameter...
228 ArgOffset = 16 + 8 * (count - 8);
229 int FI = MFI->CreateFixedObject(8, ArgOffset);
231 // Create the SelectionDAG nodes corresponding to a load
232 //from this parameter
233 SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
234 argt = newroot = DAG.getLoad(getValueType(I->getType()), dl,
235 DAG.getEntryNode(), FIN, NULL, 0);
237 ++count;
238 DAG.setRoot(newroot.getValue(1));
239 ArgValues.push_back(argt);
243 // Create a vreg to hold the output of (what will become)
244 // the "alloc" instruction
245 VirtGPR = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i64));
246 BuildMI(&BB, dl, TII->get(IA64::PSEUDO_ALLOC), VirtGPR);
247 // we create a PSEUDO_ALLOC (pseudo)instruction for now
249 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
251 // hmm:
252 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
253 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
254 // ..hmm.
256 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
258 // hmm:
259 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
260 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
261 // ..hmm.
264 unsigned tempOffset=0;
266 // if this is a varargs function, we simply lower llvm.va_start by
267 // pointing to the first entry
268 if(F.isVarArg()) {
269 tempOffset=0;
270 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
273 // here we actually do the moving of args, and store them to the stack
274 // too if this is a varargs function:
275 for (int i = 0; i < count && i < 8; ++i) {
276 BuildMI(&BB, dl, TII->get(argOpc[i]), argVreg[i]).addReg(argPreg[i]);
277 if(F.isVarArg()) {
278 // if this is a varargs function, we copy the input registers to the stack
279 int FI = MFI->CreateFixedObject(8, tempOffset);
280 tempOffset+=8; //XXX: is it safe to use r22 like this?
281 BuildMI(&BB, dl, TII->get(IA64::MOV), IA64::r22).addFrameIndex(FI);
282 // FIXME: we should use st8.spill here, one day
283 BuildMI(&BB, dl, TII->get(IA64::ST8), IA64::r22).addReg(argPreg[i]);
287 // Finally, inform the code generator which regs we return values in.
288 // (see the ISD::RET: case in the instruction selector)
289 switch (getValueType(F.getReturnType()).getSimpleVT()) {
290 default: assert(0 && "i have no idea where to return this type!");
291 case MVT::isVoid: break;
292 case MVT::i1:
293 case MVT::i8:
294 case MVT::i16:
295 case MVT::i32:
296 case MVT::i64:
297 MF.getRegInfo().addLiveOut(IA64::r8);
298 break;
299 case MVT::f32:
300 case MVT::f64:
301 MF.getRegInfo().addLiveOut(IA64::F8);
302 break;
306 std::pair<SDValue, SDValue>
307 IA64TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy,
308 bool RetSExt, bool RetZExt, bool isVarArg,
309 bool isInreg, unsigned CallingConv,
310 bool isTailCall, SDValue Callee,
311 ArgListTy &Args, SelectionDAG &DAG,
312 DebugLoc dl) {
314 MachineFunction &MF = DAG.getMachineFunction();
316 unsigned NumBytes = 16;
317 unsigned outRegsUsed = 0;
319 if (Args.size() > 8) {
320 NumBytes += (Args.size() - 8) * 8;
321 outRegsUsed = 8;
322 } else {
323 outRegsUsed = Args.size();
326 // FIXME? this WILL fail if we ever try to pass around an arg that
327 // consumes more than a single output slot (a 'real' double, int128
328 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
329 // registers we use. Hopefully, the assembler will notice.
330 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
331 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
333 // keep stack frame 16-byte aligned
334 // assert(NumBytes==((NumBytes+15) & ~15) &&
335 // "stack frame not 16-byte aligned!");
336 NumBytes = (NumBytes+15) & ~15;
338 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
340 SDValue StackPtr;
341 std::vector<SDValue> Stores;
342 std::vector<SDValue> Converts;
343 std::vector<SDValue> RegValuesToPass;
344 unsigned ArgOffset = 16;
346 for (unsigned i = 0, e = Args.size(); i != e; ++i)
348 SDValue Val = Args[i].Node;
349 MVT ObjectVT = Val.getValueType();
350 SDValue ValToStore(0, 0), ValToConvert(0, 0);
351 unsigned ObjSize=8;
352 switch (ObjectVT.getSimpleVT()) {
353 default: assert(0 && "unexpected argument type!");
354 case MVT::i1:
355 case MVT::i8:
356 case MVT::i16:
357 case MVT::i32: {
358 //promote to 64-bits, sign/zero extending based on type
359 //of the argument
360 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
361 if (Args[i].isSExt)
362 ExtendKind = ISD::SIGN_EXTEND;
363 else if (Args[i].isZExt)
364 ExtendKind = ISD::ZERO_EXTEND;
365 Val = DAG.getNode(ExtendKind, dl, MVT::i64, Val);
366 // XXX: fall through
368 case MVT::i64:
369 //ObjSize = 8;
370 if(RegValuesToPass.size() >= 8) {
371 ValToStore = Val;
372 } else {
373 RegValuesToPass.push_back(Val);
375 break;
376 case MVT::f32:
377 //promote to 64-bits
378 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
379 // XXX: fall through
380 case MVT::f64:
381 if(RegValuesToPass.size() >= 8) {
382 ValToStore = Val;
383 } else {
384 RegValuesToPass.push_back(Val);
385 if(1 /* TODO: if(calling external or varadic function)*/ ) {
386 ValToConvert = Val; // additionally pass this FP value as an int
389 break;
392 if(ValToStore.getNode()) {
393 if(!StackPtr.getNode()) {
394 StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
396 SDValue PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
397 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr, PtrOff);
398 Stores.push_back(DAG.getStore(Chain, dl, ValToStore, PtrOff, NULL, 0));
399 ArgOffset += ObjSize;
402 if(ValToConvert.getNode()) {
403 Converts.push_back(DAG.getNode(IA64ISD::GETFD, dl,
404 MVT::i64, ValToConvert));
408 // Emit all stores, make sure they occur before any copies into physregs.
409 if (!Stores.empty())
410 Chain = DAG.getNode(ISD::TokenFactor, dl,
411 MVT::Other, &Stores[0],Stores.size());
413 static const unsigned IntArgRegs[] = {
414 IA64::out0, IA64::out1, IA64::out2, IA64::out3,
415 IA64::out4, IA64::out5, IA64::out6, IA64::out7
418 static const unsigned FPArgRegs[] = {
419 IA64::F8, IA64::F9, IA64::F10, IA64::F11,
420 IA64::F12, IA64::F13, IA64::F14, IA64::F15
423 SDValue InFlag;
425 // save the current GP, SP and RP : FIXME: do we need to do all 3 always?
426 SDValue GPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::r1,
427 MVT::i64, InFlag);
428 Chain = GPBeforeCall.getValue(1);
429 InFlag = Chain.getValue(2);
430 SDValue SPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::r12,
431 MVT::i64, InFlag);
432 Chain = SPBeforeCall.getValue(1);
433 InFlag = Chain.getValue(2);
434 SDValue RPBeforeCall = DAG.getCopyFromReg(Chain, dl, IA64::rp,
435 MVT::i64, InFlag);
436 Chain = RPBeforeCall.getValue(1);
437 InFlag = Chain.getValue(2);
439 // Build a sequence of copy-to-reg nodes chained together with token chain
440 // and flag operands which copy the outgoing integer args into regs out[0-7]
441 // mapped 1:1 and the FP args into regs F8-F15 "lazily"
442 // TODO: for performance, we should only copy FP args into int regs when we
443 // know this is required (i.e. for varardic or external (unknown) functions)
445 // first to the FP->(integer representation) conversions, these are
446 // flagged for now, but shouldn't have to be (TODO)
447 unsigned seenConverts = 0;
448 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
449 if(RegValuesToPass[i].getValueType().isFloatingPoint()) {
450 Chain = DAG.getCopyToReg(Chain, dl, IntArgRegs[i],
451 Converts[seenConverts++], InFlag);
452 InFlag = Chain.getValue(1);
456 // next copy args into the usual places, these are flagged
457 unsigned usedFPArgs = 0;
458 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
459 Chain = DAG.getCopyToReg(Chain, dl,
460 RegValuesToPass[i].getValueType().isInteger() ?
461 IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag);
462 InFlag = Chain.getValue(1);
465 // If the callee is a GlobalAddress node (quite common, every direct call is)
466 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
468 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
469 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
473 std::vector<MVT> NodeTys;
474 std::vector<SDValue> CallOperands;
475 NodeTys.push_back(MVT::Other); // Returns a chain
476 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
477 CallOperands.push_back(Chain);
478 CallOperands.push_back(Callee);
480 // emit the call itself
481 if (InFlag.getNode())
482 CallOperands.push_back(InFlag);
483 else
484 assert(0 && "this should never happen!\n");
486 // to make way for a hack:
487 Chain = DAG.getNode(IA64ISD::BRCALL, dl, NodeTys,
488 &CallOperands[0], CallOperands.size());
489 InFlag = Chain.getValue(1);
491 // restore the GP, SP and RP after the call
492 Chain = DAG.getCopyToReg(Chain, dl, IA64::r1, GPBeforeCall, InFlag);
493 InFlag = Chain.getValue(1);
494 Chain = DAG.getCopyToReg(Chain, dl, IA64::r12, SPBeforeCall, InFlag);
495 InFlag = Chain.getValue(1);
496 Chain = DAG.getCopyToReg(Chain, dl, IA64::rp, RPBeforeCall, InFlag);
497 InFlag = Chain.getValue(1);
499 std::vector<MVT> RetVals;
500 RetVals.push_back(MVT::Other);
501 RetVals.push_back(MVT::Flag);
503 MVT RetTyVT = getValueType(RetTy);
504 SDValue RetVal;
505 if (RetTyVT != MVT::isVoid) {
506 switch (RetTyVT.getSimpleVT()) {
507 default: assert(0 && "Unknown value type to return!");
508 case MVT::i1: { // bools are just like other integers (returned in r8)
509 // we *could* fall through to the truncate below, but this saves a
510 // few redundant predicate ops
511 SDValue boolInR8 = DAG.getCopyFromReg(Chain, dl, IA64::r8,
512 MVT::i64,InFlag);
513 InFlag = boolInR8.getValue(2);
514 Chain = boolInR8.getValue(1);
515 SDValue zeroReg = DAG.getCopyFromReg(Chain, dl, IA64::r0,
516 MVT::i64, InFlag);
517 InFlag = zeroReg.getValue(2);
518 Chain = zeroReg.getValue(1);
520 RetVal = DAG.getSetCC(dl, MVT::i1, boolInR8, zeroReg, ISD::SETNE);
521 break;
523 case MVT::i8:
524 case MVT::i16:
525 case MVT::i32:
526 RetVal = DAG.getCopyFromReg(Chain, dl, IA64::r8, MVT::i64, InFlag);
527 Chain = RetVal.getValue(1);
529 // keep track of whether it is sign or zero extended (todo: bools?)
530 /* XXX
531 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
532 dl, MVT::i64, RetVal, DAG.getValueType(RetTyVT));
534 RetVal = DAG.getNode(ISD::TRUNCATE, dl, RetTyVT, RetVal);
535 break;
536 case MVT::i64:
537 RetVal = DAG.getCopyFromReg(Chain, dl, IA64::r8, MVT::i64, InFlag);
538 Chain = RetVal.getValue(1);
539 InFlag = RetVal.getValue(2); // XXX dead
540 break;
541 case MVT::f32:
542 RetVal = DAG.getCopyFromReg(Chain, dl, IA64::F8, MVT::f64, InFlag);
543 Chain = RetVal.getValue(1);
544 RetVal = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, RetVal,
545 DAG.getIntPtrConstant(0));
546 break;
547 case MVT::f64:
548 RetVal = DAG.getCopyFromReg(Chain, dl, IA64::F8, MVT::f64, InFlag);
549 Chain = RetVal.getValue(1);
550 InFlag = RetVal.getValue(2); // XXX dead
551 break;
555 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
556 DAG.getIntPtrConstant(0, true), SDValue());
557 return std::make_pair(RetVal, Chain);
560 SDValue IA64TargetLowering::
561 LowerOperation(SDValue Op, SelectionDAG &DAG) {
562 DebugLoc dl = Op.getDebugLoc();
563 switch (Op.getOpcode()) {
564 default: assert(0 && "Should not custom lower this!");
565 case ISD::GlobalTLSAddress:
566 assert(0 && "TLS not implemented for IA64.");
567 case ISD::RET: {
568 SDValue AR_PFSVal, Copy;
570 switch(Op.getNumOperands()) {
571 default:
572 assert(0 && "Do not know how to return this many arguments!");
573 abort();
574 case 1:
575 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), dl, VirtGPR, MVT::i64);
576 AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), dl, IA64::AR_PFS,
577 AR_PFSVal);
578 return DAG.getNode(IA64ISD::RET_FLAG, dl, MVT::Other, AR_PFSVal);
579 case 3: {
580 // Copy the result into the output register & restore ar.pfs
581 MVT ArgVT = Op.getOperand(1).getValueType();
582 unsigned ArgReg = ArgVT.isInteger() ? IA64::r8 : IA64::F8;
584 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), dl, VirtGPR, MVT::i64);
585 Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), dl, ArgReg,
586 Op.getOperand(1), SDValue());
587 AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), dl,
588 IA64::AR_PFS, AR_PFSVal, Copy.getValue(1));
589 return DAG.getNode(IA64ISD::RET_FLAG, dl, MVT::Other,
590 AR_PFSVal, AR_PFSVal.getValue(1));
593 return SDValue();
595 case ISD::VAARG: {
596 MVT VT = getPointerTy();
597 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
598 SDValue VAList = DAG.getLoad(VT, dl, Op.getOperand(0), Op.getOperand(1),
599 SV, 0);
600 // Increment the pointer, VAList, to the next vaarg
601 SDValue VAIncr = DAG.getNode(ISD::ADD, dl, VT, VAList,
602 DAG.getConstant(VT.getSizeInBits()/8,
603 VT));
604 // Store the incremented VAList to the legalized pointer
605 VAIncr = DAG.getStore(VAList.getValue(1), dl, VAIncr,
606 Op.getOperand(1), SV, 0);
607 // Load the actual argument out of the pointer VAList
608 return DAG.getLoad(Op.getValueType(), dl, VAIncr, VAList, NULL, 0);
610 case ISD::VASTART: {
611 // vastart just stores the address of the VarArgsFrameIndex slot into the
612 // memory location argument.
613 SDValue FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
614 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
615 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
617 // Frame & Return address. Currently unimplemented
618 case ISD::RETURNADDR: break;
619 case ISD::FRAMEADDR: break;
621 return SDValue();