Added llvmgcc version to allow tests to be xfailed by frontend version.
[llvm-complete.git] / lib / Target / IA64 / IA64ISelLowering.cpp
blob85e6737d4b3cc0f58961b2a724d4f46389c2e2d9
1 //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file was developed by Duraid Madina and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the IA64ISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "IA64ISelLowering.h"
15 #include "IA64MachineFunctionInfo.h"
16 #include "IA64TargetMachine.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 using namespace llvm;
26 IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
27 : TargetLowering(TM) {
29 // register class for general registers
30 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
32 // register class for FP registers
33 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
35 // register class for predicate registers
36 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
38 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
39 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
41 // ia64 uses SELECT not SELECT_CC
42 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
44 // We need to handle ISD::RET for void functions ourselves,
45 // so we get a chance to restore ar.pfs before adding a
46 // br.ret insn
47 setOperationAction(ISD::RET, MVT::Other, Custom);
49 setSetCCResultType(MVT::i1);
50 setShiftAmountType(MVT::i64);
52 setOperationAction(ISD::EXTLOAD , MVT::i1 , Promote);
54 setOperationAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
56 setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
57 setOperationAction(ISD::SEXTLOAD , MVT::i8 , Expand);
58 setOperationAction(ISD::SEXTLOAD , MVT::i16 , Expand);
59 setOperationAction(ISD::SEXTLOAD , MVT::i32 , Expand);
61 setOperationAction(ISD::FREM , MVT::f32 , Expand);
62 setOperationAction(ISD::FREM , MVT::f64 , Expand);
64 setOperationAction(ISD::UREM , MVT::f32 , Expand);
65 setOperationAction(ISD::UREM , MVT::f64 , Expand);
67 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
68 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
69 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
71 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
72 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
74 // We don't support sin/cos/sqrt
75 setOperationAction(ISD::FSIN , MVT::f64, Expand);
76 setOperationAction(ISD::FCOS , MVT::f64, Expand);
77 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
78 setOperationAction(ISD::FSIN , MVT::f32, Expand);
79 setOperationAction(ISD::FCOS , MVT::f32, Expand);
80 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
82 // FIXME: IA64 supports fcopysign natively!
83 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
84 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
86 // We don't have line number support yet.
87 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
88 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
89 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
91 //IA64 has these, but they are not implemented
92 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
93 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
94 setOperationAction(ISD::ROTL , MVT::i64 , Expand);
95 setOperationAction(ISD::ROTR , MVT::i64 , Expand);
96 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
98 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
99 setOperationAction(ISD::VAARG , MVT::Other, Custom);
100 setOperationAction(ISD::VASTART , MVT::Other, Custom);
102 // Use the default implementation.
103 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
104 setOperationAction(ISD::VAEND , MVT::Other, Expand);
105 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
106 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
107 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
109 setStackPointerRegisterToSaveRestore(IA64::r12);
111 computeRegisterProperties();
113 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
114 addLegalFPImmediate(+0.0);
115 addLegalFPImmediate(+1.0);
118 const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
119 switch (Opcode) {
120 default: return 0;
121 case IA64ISD::GETFD: return "IA64ISD::GETFD";
122 case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
123 case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
128 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
129 static bool isFloatingPointZero(SDOperand Op) {
130 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
131 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
132 else if (Op.getOpcode() == ISD::EXTLOAD || Op.getOpcode() == ISD::LOAD) {
133 // Maybe this has already been legalized into the constant pool?
134 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
135 if (ConstantFP *CFP = dyn_cast<ConstantFP>(CP->get()))
136 return CFP->isExactlyValue(-0.0) || CFP->isExactlyValue(0.0);
138 return false;
141 std::vector<SDOperand>
142 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
143 std::vector<SDOperand> ArgValues;
145 // add beautiful description of IA64 stack frame format
146 // here (from intel 24535803.pdf most likely)
148 MachineFunction &MF = DAG.getMachineFunction();
149 MachineFrameInfo *MFI = MF.getFrameInfo();
151 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
152 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
153 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
155 MachineBasicBlock& BB = MF.front();
157 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
158 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
160 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
161 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
163 unsigned argVreg[8];
164 unsigned argPreg[8];
165 unsigned argOpc[8];
167 unsigned used_FPArgs = 0; // how many FP args have been used so far?
169 unsigned ArgOffset = 0;
170 int count = 0;
172 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
174 SDOperand newroot, argt;
175 if(count < 8) { // need to fix this logic? maybe.
177 switch (getValueType(I->getType())) {
178 default:
179 assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
180 case MVT::f32:
181 // fixme? (well, will need to for weird FP structy stuff,
182 // see intel ABI docs)
183 case MVT::f64:
184 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
185 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
186 // floating point args go into f8..f15 as-needed, the increment
187 argVreg[count] = // is below..:
188 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
189 // FP args go into f8..f15 as needed: (hence the ++)
190 argPreg[count] = args_FP[used_FPArgs++];
191 argOpc[count] = IA64::FMOV;
192 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count],
193 MVT::f64);
194 if (I->getType() == Type::FloatTy)
195 argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt);
196 break;
197 case MVT::i1: // NOTE: as far as C abi stuff goes,
198 // bools are just boring old ints
199 case MVT::i8:
200 case MVT::i16:
201 case MVT::i32:
202 case MVT::i64:
203 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
204 MF.addLiveIn(args_int[count]); // mark this register as liveIn
205 argVreg[count] =
206 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
207 argPreg[count] = args_int[count];
208 argOpc[count] = IA64::MOV;
209 argt = newroot =
210 DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64);
211 if ( getValueType(I->getType()) != MVT::i64)
212 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
213 newroot);
214 break;
216 } else { // more than 8 args go into the frame
217 // Create the frame index object for this incoming parameter...
218 ArgOffset = 16 + 8 * (count - 8);
219 int FI = MFI->CreateFixedObject(8, ArgOffset);
221 // Create the SelectionDAG nodes corresponding to a load
222 //from this parameter
223 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
224 argt = newroot = DAG.getLoad(getValueType(I->getType()),
225 DAG.getEntryNode(), FIN, DAG.getSrcValue(NULL));
227 ++count;
228 DAG.setRoot(newroot.getValue(1));
229 ArgValues.push_back(argt);
233 // Create a vreg to hold the output of (what will become)
234 // the "alloc" instruction
235 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
236 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
237 // we create a PSEUDO_ALLOC (pseudo)instruction for now
239 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
241 // hmm:
242 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
243 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
244 // ..hmm.
246 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
248 // hmm:
249 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
250 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
251 // ..hmm.
254 unsigned tempOffset=0;
256 // if this is a varargs function, we simply lower llvm.va_start by
257 // pointing to the first entry
258 if(F.isVarArg()) {
259 tempOffset=0;
260 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
263 // here we actually do the moving of args, and store them to the stack
264 // too if this is a varargs function:
265 for (int i = 0; i < count && i < 8; ++i) {
266 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
267 if(F.isVarArg()) {
268 // if this is a varargs function, we copy the input registers to the stack
269 int FI = MFI->CreateFixedObject(8, tempOffset);
270 tempOffset+=8; //XXX: is it safe to use r22 like this?
271 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
272 // FIXME: we should use st8.spill here, one day
273 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
277 // Finally, inform the code generator which regs we return values in.
278 // (see the ISD::RET: case in the instruction selector)
279 switch (getValueType(F.getReturnType())) {
280 default: assert(0 && "i have no idea where to return this type!");
281 case MVT::isVoid: break;
282 case MVT::i1:
283 case MVT::i8:
284 case MVT::i16:
285 case MVT::i32:
286 case MVT::i64:
287 MF.addLiveOut(IA64::r8);
288 break;
289 case MVT::f32:
290 case MVT::f64:
291 MF.addLiveOut(IA64::F8);
292 break;
295 return ArgValues;
298 std::pair<SDOperand, SDOperand>
299 IA64TargetLowering::LowerCallTo(SDOperand Chain,
300 const Type *RetTy, bool isVarArg,
301 unsigned CallingConv, bool isTailCall,
302 SDOperand Callee, ArgListTy &Args,
303 SelectionDAG &DAG) {
305 MachineFunction &MF = DAG.getMachineFunction();
307 unsigned NumBytes = 16;
308 unsigned outRegsUsed = 0;
310 if (Args.size() > 8) {
311 NumBytes += (Args.size() - 8) * 8;
312 outRegsUsed = 8;
313 } else {
314 outRegsUsed = Args.size();
317 // FIXME? this WILL fail if we ever try to pass around an arg that
318 // consumes more than a single output slot (a 'real' double, int128
319 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
320 // registers we use. Hopefully, the assembler will notice.
321 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
322 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
324 // keep stack frame 16-byte aligned
325 //assert(NumBytes==((NumBytes+15) & ~15) && "stack frame not 16-byte aligned!");
326 NumBytes = (NumBytes+15) & ~15;
328 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
330 SDOperand StackPtr, NullSV;
331 std::vector<SDOperand> Stores;
332 std::vector<SDOperand> Converts;
333 std::vector<SDOperand> RegValuesToPass;
334 unsigned ArgOffset = 16;
336 for (unsigned i = 0, e = Args.size(); i != e; ++i)
338 SDOperand Val = Args[i].first;
339 MVT::ValueType ObjectVT = Val.getValueType();
340 SDOperand ValToStore(0, 0), ValToConvert(0, 0);
341 unsigned ObjSize=8;
342 switch (ObjectVT) {
343 default: assert(0 && "unexpected argument type!");
344 case MVT::i1:
345 case MVT::i8:
346 case MVT::i16:
347 case MVT::i32:
348 //promote to 64-bits, sign/zero extending based on type
349 //of the argument
350 if(Args[i].second->isSigned())
351 Val = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64, Val);
352 else
353 Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64, Val);
354 // XXX: fall through
355 case MVT::i64:
356 //ObjSize = 8;
357 if(RegValuesToPass.size() >= 8) {
358 ValToStore = Val;
359 } else {
360 RegValuesToPass.push_back(Val);
362 break;
363 case MVT::f32:
364 //promote to 64-bits
365 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
366 // XXX: fall through
367 case MVT::f64:
368 if(RegValuesToPass.size() >= 8) {
369 ValToStore = Val;
370 } else {
371 RegValuesToPass.push_back(Val);
372 if(1 /* TODO: if(calling external or varadic function)*/ ) {
373 ValToConvert = Val; // additionally pass this FP value as an int
376 break;
379 if(ValToStore.Val) {
380 if(!StackPtr.Val) {
381 StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
382 NullSV = DAG.getSrcValue(NULL);
384 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
385 PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff);
386 Stores.push_back(DAG.getNode(ISD::STORE, MVT::Other, Chain,
387 ValToStore, PtrOff, NullSV));
388 ArgOffset += ObjSize;
391 if(ValToConvert.Val) {
392 Converts.push_back(DAG.getNode(IA64ISD::GETFD, MVT::i64, ValToConvert));
396 // Emit all stores, make sure they occur before any copies into physregs.
397 if (!Stores.empty())
398 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, Stores);
400 static const unsigned IntArgRegs[] = {
401 IA64::out0, IA64::out1, IA64::out2, IA64::out3,
402 IA64::out4, IA64::out5, IA64::out6, IA64::out7
405 static const unsigned FPArgRegs[] = {
406 IA64::F8, IA64::F9, IA64::F10, IA64::F11,
407 IA64::F12, IA64::F13, IA64::F14, IA64::F15
410 SDOperand InFlag;
412 // save the current GP, SP and RP : FIXME: do we need to do all 3 always?
413 SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag);
414 Chain = GPBeforeCall.getValue(1);
415 InFlag = Chain.getValue(2);
416 SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag);
417 Chain = SPBeforeCall.getValue(1);
418 InFlag = Chain.getValue(2);
419 SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag);
420 Chain = RPBeforeCall.getValue(1);
421 InFlag = Chain.getValue(2);
423 // Build a sequence of copy-to-reg nodes chained together with token chain
424 // and flag operands which copy the outgoing integer args into regs out[0-7]
425 // mapped 1:1 and the FP args into regs F8-F15 "lazily"
426 // TODO: for performance, we should only copy FP args into int regs when we
427 // know this is required (i.e. for varardic or external (unknown) functions)
429 // first to the FP->(integer representation) conversions, these are
430 // flagged for now, but shouldn't have to be (TODO)
431 unsigned seenConverts = 0;
432 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
433 if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) {
434 Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], InFlag);
435 InFlag = Chain.getValue(1);
439 // next copy args into the usual places, these are flagged
440 unsigned usedFPArgs = 0;
441 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
442 Chain = DAG.getCopyToReg(Chain,
443 MVT::isInteger(RegValuesToPass[i].getValueType()) ?
444 IntArgRegs[i] : FPArgRegs[usedFPArgs++],
445 RegValuesToPass[i], InFlag);
446 InFlag = Chain.getValue(1);
449 // If the callee is a GlobalAddress node (quite common, every direct call is)
450 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
452 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
453 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
457 std::vector<MVT::ValueType> NodeTys;
458 std::vector<SDOperand> CallOperands;
459 NodeTys.push_back(MVT::Other); // Returns a chain
460 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
461 CallOperands.push_back(Chain);
462 CallOperands.push_back(Callee);
464 // emit the call itself
465 if (InFlag.Val)
466 CallOperands.push_back(InFlag);
467 else
468 assert(0 && "this should never happen!\n");
470 // to make way for a hack:
471 Chain = DAG.getNode(IA64ISD::BRCALL, NodeTys, CallOperands);
472 InFlag = Chain.getValue(1);
474 // restore the GP, SP and RP after the call
475 Chain = DAG.getCopyToReg(Chain, IA64::r1, GPBeforeCall, InFlag);
476 InFlag = Chain.getValue(1);
477 Chain = DAG.getCopyToReg(Chain, IA64::r12, SPBeforeCall, InFlag);
478 InFlag = Chain.getValue(1);
479 Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag);
480 InFlag = Chain.getValue(1);
482 std::vector<MVT::ValueType> RetVals;
483 RetVals.push_back(MVT::Other);
484 RetVals.push_back(MVT::Flag);
486 MVT::ValueType RetTyVT = getValueType(RetTy);
487 SDOperand RetVal;
488 if (RetTyVT != MVT::isVoid) {
489 switch (RetTyVT) {
490 default: assert(0 && "Unknown value type to return!");
491 case MVT::i1: { // bools are just like other integers (returned in r8)
492 // we *could* fall through to the truncate below, but this saves a
493 // few redundant predicate ops
494 SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
495 InFlag = boolInR8.getValue(2);
496 Chain = boolInR8.getValue(1);
497 SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag);
498 InFlag = zeroReg.getValue(2);
499 Chain = zeroReg.getValue(1);
501 RetVal = DAG.getSetCC(MVT::i1, boolInR8, zeroReg, ISD::SETNE);
502 break;
504 case MVT::i8:
505 case MVT::i16:
506 case MVT::i32:
507 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
508 Chain = RetVal.getValue(1);
510 // keep track of whether it is sign or zero extended (todo: bools?)
511 /* XXX
512 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
513 MVT::i64, RetVal, DAG.getValueType(RetTyVT));
515 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
516 break;
517 case MVT::i64:
518 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
519 Chain = RetVal.getValue(1);
520 InFlag = RetVal.getValue(2); // XXX dead
521 break;
522 case MVT::f32:
523 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
524 Chain = RetVal.getValue(1);
525 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal);
526 break;
527 case MVT::f64:
528 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
529 Chain = RetVal.getValue(1);
530 InFlag = RetVal.getValue(2); // XXX dead
531 break;
535 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
536 DAG.getConstant(NumBytes, getPointerTy()));
538 return std::make_pair(RetVal, Chain);
541 std::pair<SDOperand, SDOperand> IA64TargetLowering::
542 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
543 SelectionDAG &DAG) {
544 assert(0 && "LowerFrameReturnAddress unimplemented");
545 abort();
548 SDOperand IA64TargetLowering::
549 LowerOperation(SDOperand Op, SelectionDAG &DAG) {
550 switch (Op.getOpcode()) {
551 default: assert(0 && "Should not custom lower this!");
552 case ISD::RET: {
553 SDOperand AR_PFSVal, Copy;
555 switch(Op.getNumOperands()) {
556 default:
557 assert(0 && "Do not know how to return this many arguments!");
558 abort();
559 case 1:
560 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
561 AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), IA64::AR_PFS,
562 AR_PFSVal);
563 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal);
564 case 2: {
565 // Copy the result into the output register & restore ar.pfs
566 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
567 unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8;
569 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
570 Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1),
571 SDOperand());
572 AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), IA64::AR_PFS, AR_PFSVal,
573 Copy.getValue(1));
574 std::vector<MVT::ValueType> NodeTys;
575 std::vector<SDOperand> RetOperands;
576 NodeTys.push_back(MVT::Other);
577 NodeTys.push_back(MVT::Flag);
578 RetOperands.push_back(AR_PFSVal);
579 RetOperands.push_back(AR_PFSVal.getValue(1));
580 return DAG.getNode(IA64ISD::RET_FLAG, NodeTys, RetOperands);
583 return SDOperand();
585 case ISD::VAARG: {
586 MVT::ValueType VT = getPointerTy();
587 SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1),
588 Op.getOperand(2));
589 // Increment the pointer, VAList, to the next vaarg
590 SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList,
591 DAG.getConstant(MVT::getSizeInBits(VT)/8,
592 VT));
593 // Store the incremented VAList to the legalized pointer
594 VAIncr = DAG.getNode(ISD::STORE, MVT::Other, VAList.getValue(1), VAIncr,
595 Op.getOperand(1), Op.getOperand(2));
596 // Load the actual argument out of the pointer VAList
597 return DAG.getLoad(Op.getValueType(), VAIncr, VAList, DAG.getSrcValue(0));
599 case ISD::VASTART: {
600 // vastart just stores the address of the VarArgsFrameIndex slot into the
601 // memory location argument.
602 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
603 return DAG.getNode(ISD::STORE, MVT::Other, Op.getOperand(0), FR,
604 Op.getOperand(1), Op.getOperand(2));