llvm.org GIT mirror llvm / 1f595bb
Use CallConvLower.h and TableGen descriptions of the calling conventions for ARM. Patch by Sandeep Patel. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@69371 91177308-0d34-0410-b5e6-96231b3b80d8 Bob Wilson 10 years ago
27 changed file(s) with 760 addition(s) and 321 deletion(s). Raw diff Collapse all Expand all
3131 Full, // The value fills the full location.
3232 SExt, // The value is sign extended in the location.
3333 ZExt, // The value is zero extended in the location.
34 AExt // The value is extended with undefined upper bits.
34 AExt, // The value is extended with undefined upper bits.
35 BCvt // The value is bit-converted in the location
3536 // TODO: a subset of the value is in the location.
3637 };
3738 private:
4445 /// isMem - True if this is a memory loc, false if it is a register loc.
4546 bool isMem : 1;
4647
48 /// isCustom - True if this arg/retval requires special handling
49 bool isCustom : 1;
50
4751 /// Information about how the value is assigned.
48 LocInfo HTP : 7;
52 LocInfo HTP : 6;
4953
5054 /// ValVT - The type of the value being assigned.
5155 MVT ValVT;
6165 Ret.ValNo = ValNo;
6266 Ret.Loc = RegNo;
6367 Ret.isMem = false;
68 Ret.isCustom = false;
6469 Ret.HTP = HTP;
6570 Ret.ValVT = ValVT;
6671 Ret.LocVT = LocVT;
6772 return Ret;
6873 }
74
75 static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
76 unsigned RegNo, MVT LocVT,
77 LocInfo HTP) {
78 CCValAssign Ret;
79 Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
80 Ret.isCustom = true;
81 return Ret;
82 }
83
6984 static CCValAssign getMem(unsigned ValNo, MVT ValVT,
7085 unsigned Offset, MVT LocVT,
7186 LocInfo HTP) {
7388 Ret.ValNo = ValNo;
7489 Ret.Loc = Offset;
7590 Ret.isMem = true;
91 Ret.isCustom = false;
7692 Ret.HTP = HTP;
7793 Ret.ValVT = ValVT;
7894 Ret.LocVT = LocVT;
7995 return Ret;
8096 }
8197
98 static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
99 unsigned Offset, MVT LocVT,
100 LocInfo HTP) {
101 CCValAssign Ret;
102 Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
103 Ret.isCustom = true;
104 return Ret;
105 }
106
82107 unsigned getValNo() const { return ValNo; }
83108 MVT getValVT() const { return ValVT; }
84109
85110 bool isRegLoc() const { return !isMem; }
86111 bool isMemLoc() const { return isMem; }
112
113 bool needsCustom() const { return isCustom; }
87114
88115 unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
89116 unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
91118
92119 LocInfo getLocInfo() const { return HTP; }
93120 };
94
95121
96122 /// CCAssignFn - This function assigns a location for Val, updating State to
97123 /// reflect the change.
99125 MVT LocVT, CCValAssign::LocInfo LocInfo,
100126 ISD::ArgFlagsTy ArgFlags, CCState &State);
101127
102
128 /// CCCustomFn - This function assigns a location for Val, possibly updating
129 /// all args to reflect changes and indicates if it handled it. It must set
130 /// isCustom if it handles the arg and returns true.
131 typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
132 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
133 ISD::ArgFlagsTy &ArgFlags, CCState &State);
134
103135 /// CCState - This class holds information needed while lowering arguments and
104136 /// return values. It captures which registers are already assigned and which
105137 /// stack slots are used. It provides accessors to allocate these values.
1313
1414 class CCAction;
1515 class CallingConv;
16
17 /// CCCustom - Calls a custom arg handling function.
18 class CCCustom : CCAction {
19 string FuncName = fn;
20 }
1621
1722 /// CCPredicateAction - Instances of this class check some predicate, then
1823 /// delegate to another action if the predicate is true.
8994 ValueType DestTy = destTy;
9095 }
9196
97 /// CCBitConvertToType - If applied, this bitconverts the specified current
98 /// value to the specified type.
99 class CCBitConvertToType : CCAction {
100 ValueType DestTy = destTy;
101 }
102
92103 /// CCDelegateTo - This action invokes the specified sub-calling-convention. It
93104 /// is successful if the specified CC matches.
94105 class CCDelegateTo : CCAction {
8989
9090 include "ARMRegisterInfo.td"
9191
92 include "ARMCallingConv.td"
93
9294 //===----------------------------------------------------------------------===//
9395 // Instruction Descriptions
9496 //===----------------------------------------------------------------------===//
0 //===- ARMCallingConv.td - Calling Conventions for ARM ----------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 // This describes the calling conventions for ARM architecture.
9 //===----------------------------------------------------------------------===//
10
11 /// CCIfSubtarget - Match if the current subtarget has a feature F.
12 class CCIfSubtarget:
13 CCIf().", F), A>;
14
15 /// CCIfAlign - Match of the original alignment of the arg
16 class CCIfAlign:
17 CCIf;
18
19 //===----------------------------------------------------------------------===//
20 // ARM APCS Calling Convention
21 //===----------------------------------------------------------------------===//
22 def CC_ARM_APCS : CallingConv<[
23
24 CCIfType<[i8, i16], CCPromoteToType>,
25
26 // f64 is passed in pairs of GPRs, possibly split onto the stack
27 CCIfType<[f64], CCCustom<"CC_ARM_APCS_Custom_f64">>,
28
29 CCIfType<[f32], CCBitConvertToType>,
30 CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3]>>,
31
32 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
33 CCIfType<[f64], CCAssignToStack<8, 4>>
34 ]>;
35
36 def RetCC_ARM_APCS : CallingConv<[
37 CCIfType<[f32], CCBitConvertToType>,
38 CCIfType<[f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>,
39
40 CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
41 CCIfType<[i64], CCAssignToRegWithShadow<[R0, R2], [R1, R3]>>
42 ]>;
43
44 //===----------------------------------------------------------------------===//
45 // ARM AAPCS (EABI) Calling Convention
46 //===----------------------------------------------------------------------===//
47 def CC_ARM_AAPCS : CallingConv<[
48
49 CCIfType<[i8, i16], CCPromoteToType>,
50
51 // i64/f64 is passed in even pairs of GPRs
52 // i64 is 8-aligned i32 here, so we may need to eat R1 as a pad register
53 CCIfType<[i32], CCIfAlign<"8", CCAssignToRegWithShadow<[R0, R2], [R0, R1]>>>,
54 CCIfType<[f64], CCCustom<"CC_ARM_AAPCS_Custom_f64">>,
55
56 CCIfType<[f32], CCBitConvertToType>,
57 CCIfType<[i32, f32], CCAssignToReg<[R0, R1, R2, R3]>>,
58
59 CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
60 CCIfType<[f64], CCAssignToStack<8, 8>>
61 ]>;
62
63 def RetCC_ARM_AAPCS : CallingConv<[
64 CCIfType<[f32], CCBitConvertToType>,
65 CCIfType<[f64], CCCustom<"RetCC_ARM_AAPCS_Custom_f64">>,
66
67 CCIfType<[i32], CCAssignToReg<[R0, R1, R2, R3]>>,
68 CCIfType<[i64], CCAssignToRegWithShadow<[R0, R2], [R1, R3]>>
69 ]>;
70
71 //===----------------------------------------------------------------------===//
72 // ARM Calling Convention Dispatch
73 //===----------------------------------------------------------------------===//
74
75 def CC_ARM : CallingConv<[
76 CCIfSubtarget<"isAAPCS_ABI()", CCDelegateTo>,
77 CCDelegateTo
78 ]>;
79
80 def RetCC_ARM : CallingConv<[
81 CCIfSubtarget<"isAAPCS_ABI()", CCDelegateTo>,
82 CCDelegateTo
83 ]>;
2121 #include "ARMTargetMachine.h"
2222 #include "llvm/CallingConv.h"
2323 #include "llvm/Constants.h"
24 #include "llvm/Function.h"
2425 #include "llvm/Instruction.h"
2526 #include "llvm/Intrinsics.h"
2627 #include "llvm/GlobalValue.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
2729 #include "llvm/CodeGen/MachineBasicBlock.h"
2830 #include "llvm/CodeGen/MachineFrameInfo.h"
2931 #include "llvm/CodeGen/MachineFunction.h"
3032 #include "llvm/CodeGen/MachineInstrBuilder.h"
3133 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/PseudoSourceValue.h"
3235 #include "llvm/CodeGen/SelectionDAG.h"
3336 #include "llvm/Target/TargetOptions.h"
3437 #include "llvm/ADT/VectorExtras.h"
3538 #include "llvm/Support/MathExtras.h"
3639 using namespace llvm;
40
41 static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
42 MVT &LocVT,
43 CCValAssign::LocInfo &LocInfo,
44 ISD::ArgFlagsTy &ArgFlags,
45 CCState &State);
46 static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
47 MVT &LocVT,
48 CCValAssign::LocInfo &LocInfo,
49 ISD::ArgFlagsTy &ArgFlags,
50 CCState &State);
51 static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
52 MVT &LocVT,
53 CCValAssign::LocInfo &LocInfo,
54 ISD::ArgFlagsTy &ArgFlags,
55 CCState &State);
56 static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
57 MVT &LocVT,
58 CCValAssign::LocInfo &LocInfo,
59 ISD::ArgFlagsTy &ArgFlags,
60 CCState &State);
3761
3862 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
3963 : TargetLowering(TM), ARMPCLabelIndex(0) {
360384 return Invert;
361385 }
362386
363 static void
364 HowToPassArgument(MVT ObjectVT, unsigned NumGPRs,
365 unsigned StackOffset, unsigned &NeededGPRs,
366 unsigned &NeededStackSize, unsigned &GPRPad,
367 unsigned &StackPad, ISD::ArgFlagsTy Flags) {
368 NeededStackSize = 0;
369 NeededGPRs = 0;
370 StackPad = 0;
371 GPRPad = 0;
372 unsigned align = Flags.getOrigAlign();
373 GPRPad = NumGPRs % ((align + 3)/4);
374 StackPad = StackOffset % align;
375 unsigned firstGPR = NumGPRs + GPRPad;
376 switch (ObjectVT.getSimpleVT()) {
377 default: assert(0 && "Unhandled argument type!");
378 case MVT::i32:
379 case MVT::f32:
380 if (firstGPR < 4)
381 NeededGPRs = 1;
387 //===----------------------------------------------------------------------===//
388 // Calling Convention Implementation
389 //
390 // The lower operations present on calling convention works on this order:
391 // LowerCALL (virt regs --> phys regs, virt regs --> stack)
392 // LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
393 // LowerRET (virt regs --> phys regs)
394 // LowerCALL (phys regs --> virt regs)
395 //
396 //===----------------------------------------------------------------------===//
397
398 #include "ARMGenCallingConv.inc"
399
400 // APCS f64 is in register pairs, possibly split to stack
401 static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
402 MVT &LocVT,
403 CCValAssign::LocInfo &LocInfo,
404 ISD::ArgFlagsTy &ArgFlags,
405 CCState &State) {
406 static const unsigned HiRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
407 static const unsigned LoRegList[] = { ARM::R1,
408 ARM::R2,
409 ARM::R3,
410 ARM::NoRegister };
411
412 if (unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 4)) {
413 unsigned i;
414 for (i = 0; i < 4; ++i)
415 if (HiRegList[i] == Reg)
416 break;
417
418 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
419 MVT::i32, LocInfo));
420 if (LoRegList[i] != ARM::NoRegister)
421 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
422 MVT::i32, LocInfo));
382423 else
383 NeededStackSize = 4;
384 break;
385 case MVT::i64:
386 case MVT::f64:
387 if (firstGPR < 3)
388 NeededGPRs = 2;
389 else if (firstGPR == 3) {
390 NeededGPRs = 1;
391 NeededStackSize = 4;
392 } else
393 NeededStackSize = 8;
394 }
424 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
425 State.AllocateStack(4, 4),
426 MVT::i32, LocInfo));
427 return true; // we handled it
428 }
429
430 return false; // we didn't handle it
431 }
432
433 // AAPCS f64 is in aligned register pairs
434 static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
435 MVT &LocVT,
436 CCValAssign::LocInfo &LocInfo,
437 ISD::ArgFlagsTy &ArgFlags,
438 CCState &State) {
439 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
440 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
441
442 if (unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2)) {
443 unsigned i;
444 for (i = 0; i < 2; ++i)
445 if (HiRegList[i] == Reg)
446 break;
447
448 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
449 MVT::i32, LocInfo));
450 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
451 MVT::i32, LocInfo));
452 return true; // we handled it
453 }
454
455 return false; // we didn't handle it
456 }
457
458 static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
459 MVT &LocVT,
460 CCValAssign::LocInfo &LocInfo,
461 ISD::ArgFlagsTy &ArgFlags,
462 CCState &State) {
463 static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
464 static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
465
466 if (unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2)) {
467 unsigned i;
468 for (i = 0; i < 2; ++i)
469 if (HiRegList[i] == Reg)
470 break;
471
472 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
473 MVT::i32, LocInfo));
474 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
475 MVT::i32, LocInfo));
476 return true; // we handled it
477 }
478
479 return false; // we didn't handle it
480 }
481
482 static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT,
483 MVT &LocVT,
484 CCValAssign::LocInfo &LocInfo,
485 ISD::ArgFlagsTy &ArgFlags,
486 CCState &State) {
487 return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
488 State);
489 }
490
491 /// AddLiveIn - This helper function adds the specified physical register to the
492 /// MachineFunction as a live in value. It also creates a corresponding virtual
493 /// register for it.
494 static unsigned AddLiveIn(MachineFunction &MF, unsigned PReg,
495 const TargetRegisterClass *RC) {
496 assert(RC->contains(PReg) && "Not the correct regclass!");
497 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
498 MF.getRegInfo().addLiveIn(PReg, VReg);
499 return VReg;
500 }
501
502 /// LowerCallResult - Lower the result values of an ISD::CALL into the
503 /// appropriate copies out of appropriate physical registers. This assumes that
504 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
505 /// being lowered. The returns a SDNode with the same number of values as the
506 /// ISD::CALL.
507 SDNode *ARMTargetLowering::
508 LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
509 unsigned CallingConv, SelectionDAG &DAG) {
510
511 DebugLoc dl = TheCall->getDebugLoc();
512 // Assign locations to each value returned by this call.
513 SmallVector RVLocs;
514 bool isVarArg = TheCall->isVarArg();
515 CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
516 CCInfo.AnalyzeCallResult(TheCall, RetCC_ARM);
517
518 SmallVector ResultVals;
519
520 // Copy all of the result registers out of their specified physreg.
521 for (unsigned i = 0; i != RVLocs.size(); ++i) {
522 CCValAssign VA = RVLocs[i];
523
524 // handle f64 as custom
525 if (VA.needsCustom()) {
526 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
527 InFlag);
528 VA = RVLocs[++i]; // skip ahead to next loc
529 SDValue Hi = DAG.getCopyFromReg(Lo, dl, VA.getLocReg(), VA.getLocVT(),
530 Lo.getValue(2));
531 ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, dl, VA.getValVT(), Lo,
532 Hi));
533 } else {
534 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
535 InFlag).getValue(1);
536 SDValue Val = Chain.getValue(0);
537 InFlag = Chain.getValue(2);
538
539 switch (VA.getLocInfo()) {
540 default: assert(0 && "Unknown loc info!");
541 case CCValAssign::Full: break;
542 case CCValAssign::BCvt:
543 Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(),
544 Chain.getValue(0));
545 break;
546 }
547
548 ResultVals.push_back(Val);
549 }
550 }
551
552 // Merge everything together with a MERGE_VALUES node.
553 ResultVals.push_back(Chain);
554 return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
555 &ResultVals[0], ResultVals.size()).getNode();
556 }
557
558 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
559 /// by "Src" to address "Dst" of size "Size". Alignment information is
560 /// specified by the specific parameter attribute. The copy will be passed as
561 /// a byval function parameter.
562 /// Sometimes what we are copying is the end of a larger object, the part that
563 /// does not fit in registers.
564 static SDValue
565 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
566 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
567 DebugLoc dl) {
568 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
569 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
570 /*AlwaysInline=*/false, NULL, 0, NULL, 0);
571 }
572
573 /// LowerMemOpCallTo - Store the argument to the stack
574 SDValue
575 ARMTargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
576 const SDValue &StackPtr,
577 const CCValAssign &VA,
578 SDValue Chain,
579 SDValue Arg, ISD::ArgFlagsTy Flags) {
580 DebugLoc dl = TheCall->getDebugLoc();
581 unsigned LocMemOffset = VA.getLocMemOffset();
582 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
583 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
584 if (Flags.isByVal()) {
585 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
586 }
587 return DAG.getStore(Chain, dl, Arg, PtrOff,
588 PseudoSourceValue::getStack(), LocMemOffset);
395589 }
396590
397591 /// LowerCALL - Lowering a ISD::CALL node into a callseq_start <-
399593 /// nodes.
400594 SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
401595 CallSDNode *TheCall = cast(Op.getNode());
402 MVT RetVT = TheCall->getRetValType(0);
403 SDValue Chain = TheCall->getChain();
404 assert((TheCall->getCallingConv() == CallingConv::C ||
405 TheCall->getCallingConv() == CallingConv::Fast) &&
406 "unknown calling convention");
407 SDValue Callee = TheCall->getCallee();
408 unsigned NumOps = TheCall->getNumArgs();
409 DebugLoc dl = TheCall->getDebugLoc();
410 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
411 unsigned NumGPRs = 0; // GPRs used for parameter passing.
412
413 // Count how many bytes are to be pushed on the stack.
414 unsigned NumBytes = 0;
415
416 // Add up all the space actually used.
417 for (unsigned i = 0; i < NumOps; ++i) {
418 unsigned ObjSize;
419 unsigned ObjGPRs;
420 unsigned StackPad;
421 unsigned GPRPad;
422 MVT ObjectVT = TheCall->getArg(i).getValueType();
423 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
424 HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize,
425 GPRPad, StackPad, Flags);
426 NumBytes += ObjSize + StackPad;
427 NumGPRs += ObjGPRs + GPRPad;
428 }
596 MVT RetVT = TheCall->getRetValType(0);
597 SDValue Chain = TheCall->getChain();
598 unsigned CC = TheCall->getCallingConv();
599 assert((CC == CallingConv::C ||
600 CC == CallingConv::Fast) && "unknown calling convention");
601 bool isVarArg = TheCall->isVarArg();
602 SDValue Callee = TheCall->getCallee();
603 DebugLoc dl = TheCall->getDebugLoc();
604
605 // Analyze operands of the call, assigning locations to each operand.
606 SmallVector ArgLocs;
607 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
608 CCInfo.AnalyzeCallOperands(TheCall, CC_ARM);
609
610 // Get a count of how many bytes are to be pushed on the stack.
611 unsigned NumBytes = CCInfo.getNextStackOffset();
429612
430613 // Adjust the stack pointer for the new arguments...
431614 // These operations are automatically eliminated by the prolog/epilog pass
433616
434617 SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
435618
436 static const unsigned GPRArgRegs[] = {
437 ARM::R0, ARM::R1, ARM::R2, ARM::R3
438 };
439
440 NumGPRs = 0;
441 std::vector > RegsToPass;
442 std::vector MemOpChains;
443 for (unsigned i = 0; i != NumOps; ++i) {
444 SDValue Arg = TheCall->getArg(i);
445 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
446 MVT ArgVT = Arg.getValueType();
447
448 unsigned ObjSize;
449 unsigned ObjGPRs;
450 unsigned GPRPad;
451 unsigned StackPad;
452 HowToPassArgument(ArgVT, NumGPRs, ArgOffset, ObjGPRs,
453 ObjSize, GPRPad, StackPad, Flags);
454 NumGPRs += GPRPad;
455 ArgOffset += StackPad;
456 if (ObjGPRs > 0) {
457 switch (ArgVT.getSimpleVT()) {
458 default: assert(0 && "Unexpected ValueType for argument!");
459 case MVT::i32:
460 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg));
461 break;
462 case MVT::f32:
463 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs],
464 DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg)));
465 break;
466 case MVT::i64: {
467 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
468 DAG.getConstant(0, getPointerTy()));
469 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
470 DAG.getConstant(1, getPointerTy()));
471 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Lo));
472 if (ObjGPRs == 2)
473 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Hi));
474 else {
475 SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
476 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
477 MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff, NULL, 0));
478 }
479 break;
619 SmallVector, 8> RegsToPass;
620 SmallVector MemOpChains;
621
622 // Walk the register/memloc assignments, inserting copies/loads. In the case
623 // of tail call optimization arguments are handle later.
624 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
625 i != e;
626 ++i, ++realArgIdx) {
627 CCValAssign &VA = ArgLocs[i];
628 SDValue Arg = TheCall->getArg(realArgIdx);
629 ISD::ArgFlagsTy Flags = TheCall->getArgFlags(realArgIdx);
630
631 // Promote the value if needed.
632 switch (VA.getLocInfo()) {
633 default: assert(0 && "Unknown loc info!");
634 case CCValAssign::Full: break;
635 case CCValAssign::SExt:
636 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
637 break;
638 case CCValAssign::ZExt:
639 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
640 break;
641 case CCValAssign::AExt:
642 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
643 break;
644 case CCValAssign::BCvt:
645 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
646 break;
647 }
648
649 // f64 is passed in i32 pairs and must be combined
650 if (VA.needsCustom()) {
651 SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
652 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
653 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
654 VA = ArgLocs[++i]; // skip ahead to next loc
655 if (VA.isRegLoc())
656 RegsToPass.push_back(std::make_pair(VA.getLocReg(),
657 fmrrd.getValue(1)));
658 else {
659 assert(VA.isMemLoc());
660 if (StackPtr.getNode() == 0)
661 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
662
663 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
664 Chain, fmrrd.getValue(1),
665 Flags));
480666 }
481 case MVT::f64: {
482 SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
483 DAG.getVTList(MVT::i32, MVT::i32),
484 &Arg, 1);
485 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Cvt));
486 if (ObjGPRs == 2)
487 RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1],
488 Cvt.getValue(1)));
489 else {
490 SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
491 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
492 MemOpChains.push_back(DAG.getStore(Chain, dl, Cvt.getValue(1), PtrOff,
493 NULL, 0));
494 }
495 break;
496 }
497 }
667 } else if (VA.isRegLoc()) {
668 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
498669 } else {
499 assert(ObjSize != 0);
500 SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
501 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
502 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0));
670 assert(VA.isMemLoc());
671 if (StackPtr.getNode() == 0)
672 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
673
674 MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
675 Chain, Arg, Flags));
503676 }
504
505 NumGPRs += ObjGPRs;
506 ArgOffset += ObjSize;
507677 }
508678
509679 if (!MemOpChains.empty())
609779 if (RetVT != MVT::Other)
610780 InFlag = Chain.getValue(1);
611781
612 std::vector ResultVals;
613
614 // If the call has results, copy the values out of the ret val registers.
615 switch (RetVT.getSimpleVT()) {
616 default: assert(0 && "Unexpected ret value!");
617 case MVT::Other:
618 break;
619 case MVT::i32:
620 Chain = DAG.getCopyFromReg(Chain, dl, ARM::R0,
621 MVT::i32, InFlag).getValue(1);
622 ResultVals.push_back(Chain.getValue(0));
623 if (TheCall->getNumRetVals() > 1 &&
624 TheCall->getRetValType(1) == MVT::i32) {
625 // Returns a i64 value.
626 Chain = DAG.getCopyFromReg(Chain, dl, ARM::R1, MVT::i32,
627 Chain.getValue(2)).getValue(1);
628 ResultVals.push_back(Chain.getValue(0));
629 }
630 break;
631 case MVT::f32:
632 Chain = DAG.getCopyFromReg(Chain, dl, ARM::R0,
633 MVT::i32, InFlag).getValue(1);
634 ResultVals.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32,
635 Chain.getValue(0)));
636 break;
637 case MVT::f64: {
638 SDValue Lo = DAG.getCopyFromReg(Chain, dl, ARM::R0, MVT::i32, InFlag);
639 SDValue Hi = DAG.getCopyFromReg(Lo, dl, ARM::R1, MVT::i32, Lo.getValue(2));
640 ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi));
641 break;
642 }
643 }
644
645 if (ResultVals.empty())
646 return Chain;
647
648 ResultVals.push_back(Chain);
649 SDValue Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size(), dl);
650 return Res.getValue(Op.getResNo());
651 }
652
653 static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
654 SDValue Copy;
782 // Handle result values, copying them out of physregs into vregs that we
783 // return.
784 return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
785 Op.getResNo());
786 }
787
788 SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
789 // The chain is always operand #0
655790 SDValue Chain = Op.getOperand(0);
656791 DebugLoc dl = Op.getDebugLoc();
657 switch(Op.getNumOperands()) {
658 default:
659 assert(0 && "Do not know how to return this many arguments!");
660 abort();
661 case 1: {
662 SDValue LR = DAG.getRegister(ARM::LR, MVT::i32);
663 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
664 }
665 case 3:
666 Op = Op.getOperand(1);
667 if (Op.getValueType() == MVT::f32) {
668 Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
669 } else if (Op.getValueType() == MVT::f64) {
670 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
671 // available.
672 Op = DAG.getNode(ARMISD::FMRRD, dl,
673 DAG.getVTList(MVT::i32, MVT::i32), &Op,1);
674 SDValue Sign = DAG.getConstant(0, MVT::i32);
675 return DAG.getNode(ISD::RET, dl, MVT::Other, Chain, Op, Sign,
676 Op.getValue(1), Sign);
792
793 // CCValAssign - represent the assignment of
794 // the return value to a location
795 SmallVector RVLocs;
796 unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
797 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
798
799 // CCState - Info about the registers and stack slot.
800 CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
801
802 // Analize return values of ISD::RET
803 CCInfo.AnalyzeReturn(Op.getNode(), RetCC_ARM);
804
805 // If this is the first return lowered for this function, add
806 // the regs to the liveout set for the function.
807 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
808 for (unsigned i = 0; i != RVLocs.size(); ++i)
809 if (RVLocs[i].isRegLoc())
810 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
811 }
812
813 SDValue Flag;
814
815 // Copy the result values into the output registers.
816 for (unsigned i = 0, realRVLocIdx = 0;
817 i != RVLocs.size();
818 ++i, ++realRVLocIdx) {
819 CCValAssign &VA = RVLocs[i];
820 assert(VA.isRegLoc() && "Can only return in registers!");
821
822 // ISD::RET => ret chain, (regnum1,val1), ...
823 // So i*2+1 index only the regnums
824 SDValue Arg = Op.getOperand(realRVLocIdx*2+1);
825
826 switch (VA.getLocInfo()) {
827 default: assert(0 && "Unknown loc info!");
828 case CCValAssign::Full: break;
829 case CCValAssign::BCvt:
830 Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
831 break;
677832 }
678 Copy = DAG.getCopyToReg(Chain, dl, ARM::R0, Op, SDValue());
679 if (DAG.getMachineFunction().getRegInfo().liveout_empty())
680 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
681 break;
682 case 5:
683 Copy = DAG.getCopyToReg(Chain, dl, ARM::R1, Op.getOperand(3), SDValue());
684 Copy = DAG.getCopyToReg(Copy, dl, ARM::R0, Op.getOperand(1),
685 Copy.getValue(1));
686 // If we haven't noted the R0+R1 are live out, do so now.
687 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
688 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
689 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R1);
690 }
691 break;
692 case 9: // i128 -> 4 regs
693 Copy = DAG.getCopyToReg(Chain, dl, ARM::R3, Op.getOperand(7), SDValue());
694 Copy = DAG.getCopyToReg(Copy, dl, ARM::R2, Op.getOperand(5),
695 Copy.getValue(1));
696 Copy = DAG.getCopyToReg(Copy, dl, ARM::R1, Op.getOperand(3),
697 Copy.getValue(1));
698 Copy = DAG.getCopyToReg(Copy, dl, ARM::R0, Op.getOperand(1),
699 Copy.getValue(1));
700 // If we haven't noted the R0+R1 are live out, do so now.
701 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
702 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
703 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R1);
704 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R2);
705 DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R3);
706 }
707 break;
708
709 }
710
711 //We must use RET_FLAG instead of BRIND because BRIND doesn't have a flag
712 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Copy, Copy.getValue(1));
833
834 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
835 // available.
836 if (VA.needsCustom()) {
837 SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
838 DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
839 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
840 VA = RVLocs[++i]; // skip ahead to next loc
841 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
842 Flag);
843 } else
844 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
845
846 // guarantee that all emitted copies are
847 // stuck together, avoiding something bad
848 Flag = Chain.getValue(1);
849 }
850
851 SDValue result;
852 if (Flag.getNode())
853 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
854 else // Return Void
855 result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
856
857 return result;
713858 }
714859
715860 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
9321077 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
9331078 }
9341079
935 static SDValue LowerFORMAL_ARGUMENT(SDValue Op, SelectionDAG &DAG,
936 unsigned ArgNo, unsigned &NumGPRs,
937 unsigned &ArgOffset, DebugLoc dl) {
938 MachineFunction &MF = DAG.getMachineFunction();
939 MVT ObjectVT = Op.getValue(ArgNo).getValueType();
940 SDValue Root = Op.getOperand(0);
941 MachineRegisterInfo &RegInfo = MF.getRegInfo();
942 ARMFunctionInfo *AFI = MF.getInfo();
943
944 static const unsigned GPRArgRegs[] = {
945 ARM::R0, ARM::R1, ARM::R2, ARM::R3
946 };
947
948 unsigned ObjSize;
949 unsigned ObjGPRs;
950 unsigned GPRPad;
951 unsigned StackPad;
952 ISD::ArgFlagsTy Flags =
953 cast(Op.getOperand(ArgNo + 3))->getArgFlags();
954 HowToPassArgument(ObjectVT, NumGPRs, ArgOffset, ObjGPRs,
955 ObjSize, GPRPad, StackPad, Flags);
956 NumGPRs += GPRPad;
957 ArgOffset += StackPad;
958
959 SDValue ArgValue;
960 if (ObjGPRs == 1) {
961 unsigned VReg;
962 if (AFI->isThumbFunction())
963 VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
964 else
965 VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
966 RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
967 ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
968 if (ObjectVT == MVT::f32)
969 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
970 } else if (ObjGPRs == 2) {
971 unsigned VReg;
972 if (AFI->isThumbFunction())
973 VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
974 else
975 VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
976 RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
977 ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
978
979 if (AFI->isThumbFunction())
980 VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
981 else
982 VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
983 RegInfo.addLiveIn(GPRArgRegs[NumGPRs+1], VReg);
984 SDValue ArgValue2 = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
985
986 assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
987 ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
988 }
989 NumGPRs += ObjGPRs;
990
991 if (ObjSize) {
992 MachineFrameInfo *MFI = MF.getFrameInfo();
993 int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
994 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
995 if (ObjGPRs == 0)
996 ArgValue = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0);
997 else {
998 SDValue ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
999 assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
1000 ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
1001 }
1002
1003 ArgOffset += ObjSize; // Move on to the next argument.
1004 }
1005
1006 return ArgValue;
1007 }
1008
10091080 SDValue
10101081 ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
1011 std::vector ArgValues;
1082 MachineFunction &MF = DAG.getMachineFunction();
1083 MachineFrameInfo *MFI = MF.getFrameInfo();
1084
10121085 SDValue Root = Op.getOperand(0);
10131086 DebugLoc dl = Op.getDebugLoc();
1014 unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
1015 unsigned NumGPRs = 0; // GPRs used for parameter passing.
1016
1017 unsigned NumArgs = Op.getNode()->getNumValues()-1;
1018 for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo)
1019 ArgValues.push_back(LowerFORMAL_ARGUMENT(Op, DAG, ArgNo,
1020 NumGPRs, ArgOffset, dl));
1021
10221087 bool isVarArg = cast(Op.getOperand(2))->getZExtValue() != 0;
1088 unsigned CC = MF.getFunction()->getCallingConv();
1089 ARMFunctionInfo *AFI = MF.getInfo();
1090
1091 // Assign locations to all of the incoming arguments.
1092 SmallVector ArgLocs;
1093 CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
1094 CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_ARM);
1095
1096 SmallVector ArgValues;
1097
1098 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1099 CCValAssign &VA = ArgLocs[i];
1100
1101 // Arguments stored on registers
1102 if (VA.isRegLoc()) {
1103 MVT RegVT = VA.getLocVT();
1104 TargetRegisterClass *RC;
1105 if (AFI->isThumbFunction())
1106 RC = ARM::tGPRRegisterClass;
1107 else
1108 RC = ARM::GPRRegisterClass;
1109
1110 if (RegVT == MVT::f64) {
1111 // f64 is passed in pairs of GPRs and must be combined
1112 RegVT = MVT::i32;
1113 } else if (!((RegVT == MVT::i32) || (RegVT == MVT::f32)))
1114 assert(0 && "RegVT not supported by FORMAL_ARGUMENTS Lowering");
1115
1116 // Transform the arguments stored on
1117 // physical registers into virtual ones
1118 unsigned Reg = AddLiveIn(MF, VA.getLocReg(), RC);
1119 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
1120
1121 // f64 is passed in i32 pairs and must be combined
1122 if (VA.needsCustom()) {
1123 SDValue ArgValue2;
1124
1125 VA = ArgLocs[++i]; // skip ahead to next loc
1126 if (VA.isMemLoc()) {
1127 // must be APCS and older than V5T to split like this
1128 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
1129 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
1130
1131 // Create load node to retrieve arguments from the stack
1132 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1133 ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
1134 } else {
1135 Reg = AddLiveIn(MF, VA.getLocReg(), RC);
1136 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
1137 }
1138
1139 ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64,
1140 ArgValue, ArgValue2);
1141 }
1142
1143 // If this is an 8 or 16-bit value, it is really passed promoted
1144 // to 32 bits. Insert an assert[sz]ext to capture this, then
1145 // truncate to the right size.
1146 switch (VA.getLocInfo()) {
1147 default: assert(0 && "Unknown loc info!");
1148 case CCValAssign::Full: break;
1149 case CCValAssign::BCvt:
1150 ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
1151 break;
1152 case CCValAssign::SExt:
1153 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
1154 DAG.getValueType(VA.getValVT()));
1155 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1156 break;
1157 case CCValAssign::ZExt:
1158 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
1159 DAG.getValueType(VA.getValVT()));
1160 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
1161 break;
1162 }
1163
1164 ArgValues.push_back(ArgValue);
1165
1166 } else { // VA.isRegLoc()
1167
1168 // sanity check
1169 assert(VA.isMemLoc());
1170 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
1171
1172 unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
1173 int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
1174
1175 // Create load nodes to retrieve arguments from the stack
1176 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1177 ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0));
1178 }
1179 }
1180
1181 // varargs
10231182 if (isVarArg) {
10241183 static const unsigned GPRArgRegs[] = {
10251184 ARM::R0, ARM::R1, ARM::R2, ARM::R3
10261185 };
10271186
1028 MachineFunction &MF = DAG.getMachineFunction();
1029 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1030 MachineFrameInfo *MFI = MF.getFrameInfo();
1031 ARMFunctionInfo *AFI = MF.getInfo();
1187 unsigned NumGPRs = CCInfo.getFirstUnallocated(GPRArgRegs,
1188 sizeof(GPRArgRegs)/sizeof(GPRArgRegs[0]));
1189
10321190 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
10331191 unsigned VARegSize = (4 - NumGPRs) * 4;
10341192 unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
1193 unsigned ArgOffset = 0;
10351194 if (VARegSaveSize) {
10361195 // If this function is vararg, store any remaining integer argument regs
10371196 // to their spots on the stack so that they may be loaded by deferencing
10381197 // the result of va_next.
10391198 AFI->setVarArgsRegSaveSize(VARegSaveSize);
1199 ArgOffset = CCInfo.getNextStackOffset();
10401200 VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
10411201 VARegSaveSize - VARegSize);
10421202 SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
10431203
10441204 SmallVector MemOps;
10451205 for (; NumGPRs < 4; ++NumGPRs) {
1046 unsigned VReg;
1206 TargetRegisterClass *RC;
10471207 if (AFI->isThumbFunction())
1048 VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
1208 RC = ARM::tGPRRegisterClass;
10491209 else
1050 VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
1051 RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
1210 RC = ARM::GPRRegisterClass;
1211
1212 unsigned VReg = AddLiveIn(MF, GPRArgRegs[NumGPRs], RC);
10521213 SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
10531214 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
10541215 MemOps.push_back(Store);
10671228
10681229 // Return the new list of results.
10691230 return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
1070 &ArgValues[0], ArgValues.size());
1231 &ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
10711232 }
10721233
10731234 /// isFloatingPointZero - Return true if this is +0.0.
1717 #include "ARMSubtarget.h"
1818 #include "llvm/Target/TargetLowering.h"
1919 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
2021 #include
2122
2223 namespace llvm {
146147 ///
147148 unsigned ARMPCLabelIndex;
148149
150 SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
151 const SDValue &StackPtr, const CCValAssign &VA,
152 SDValue Chain,
153 SDValue Arg, ISD::ArgFlagsTy Flags);
154 SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
155 unsigned CallingConv, SelectionDAG &DAG);
149156 SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
157 SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
150158 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
151159 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
152160 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
77 tablegen(ARMGenCodeEmitter.inc -gen-emitter)
88 tablegen(ARMGenAsmWriter.inc -gen-asm-writer)
99 tablegen(ARMGenDAGISel.inc -gen-dag-isel)
10 tablegen(ARMGenCallingConv.inc -gen-callingconv)
1011 tablegen(ARMGenSubtarget.inc -gen-subtarget)
1112
1213 add_llvm_target(ARMCodeGen
1515 ARMGenRegisterInfo.inc ARMGenInstrNames.inc \
1616 ARMGenInstrInfo.inc ARMGenAsmWriter.inc \
1717 ARMGenDAGISel.inc ARMGenSubtarget.inc \
18 ARMGenCodeEmitter.inc
18 ARMGenCodeEmitter.inc ARMGenCallingConv.inc
1919
2020 DIRS = AsmPrinter
2121
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define i32 @f(i32 %a, i128 %b) {
4 %tmp = call i32 @g(i128 %b)
5 ret i32 %tmp
6 }
7
8 declare i32 @g(i128)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define i64 @f(i32 %a, i128 %b) {
4 %tmp = call i64 @g(i128 %b)
5 ret i64 %tmp
6 }
7
8 declare i64 @g(i128)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define float @f(i32 %a, i128 %b) {
4 %tmp = call float @g(i128 %b)
5 ret float %tmp
6 }
7
8 declare float @g(i128)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define double @f(i32 %a, i128 %b) {
4 %tmp = call double @g(i128 %b)
5 ret double %tmp
6 }
7
8 declare double @g(i128)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define i128 @f(i32 %a, i128 %b) {
4 %tmp = call i128 @g(i128 %b)
5 ret i128 %tmp
6 }
7
8 declare i128 @g(i128)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define double @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, double %b) {
4 %tmp = call double @g(i32 %a2, i32 %a3, i32 %a4, i32 %a5, double %b)
5 ret double %tmp
6 }
7
8 declare double @g(double)
0 ; RUN: llvm-as < %s | llc -mtriple=arm-linux-gnueabi
1 ; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin
2
3 define i64 @f(i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b) {
4 %tmp = call i64 @g(i32 %a2, i32 %a3, i32 %a4, i32 %a5, i64 %b)
5 ret i64 %tmp
6 }
7
8 declare i64 @g(i64)
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 declare void @bar(i64 %x, i64 %y)
3
4 define void @foo() {
5 call void @bar(i64 2, i64 3)
6 ret void
7 }
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define float @test_f32(float %a1, float %a2) {
3 ret float %a2
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define float @test_f32_arg5(float %a1, float %a2, float %a3, float %a4, float %a5) {
3 ret float %a5
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define double @test_f64(double %a1, double %a2) {
3 ret double %a2
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mcpu=arm8 -mattr=+vfp2
1
2 define double @test_double_arg_reg_split(i32 %a1, double %a2) {
3 ret double %a2
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define double @test_double_arg_split(i64 %a1, i32 %a2, double %a3) {
3 ret double %a3
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define double @test_double_arg_stack(i64 %a1, i32 %a2, i32 %a3, double %a4) {
3 ret double %a4
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define i128 @test_i128(i128 %a1, i128 %a2, i128 %a3) {
3 ret i128 %a3
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define i64 @test_i64(i64 %a1, i64 %a2) {
3 ret i64 %a2
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define i64 @test_i64_arg3(i64 %a1, i64 %a2, i64 %a3) {
3 ret i64 %a3
4 }
5
0 ; RUN: llvm-as < %s | llc -march=arm -mattr=+vfp2
1
2 define i64 @test_i64_arg_split(i64 %a1, i32 %a2, i64 %a3) {
3 ret i64 %a3
4 }
5
181181 << IndentStr << IndentStr << "LocInfo = CCValAssign::ZExt;\n"
182182 << IndentStr << "else\n"
183183 << IndentStr << IndentStr << "LocInfo = CCValAssign::AExt;\n";
184 } else if (Action->isSubClassOf("CCBitConvertToType")) {
185 Record *DestTy = Action->getValueAsDef("DestTy");
186 O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
187 O << IndentStr << "LocInfo = CCValAssign::BCvt;\n";
184188 } else if (Action->isSubClassOf("CCPassByVal")) {
185189 int Size = Action->getValueAsInt("Size");
186190 int Align = Action->getValueAsInt("Align");
188192 << "State.HandleByVal(ValNo, ValVT, LocVT, LocInfo, "
189193 << Size << ", " << Align << ", ArgFlags);\n";
190194 O << IndentStr << "return false;\n";
195 } else if (Action->isSubClassOf("CCCustom")) {
196 O << IndentStr
197 << "if (" << Action->getValueAsString("FuncName") << "(ValNo, ValVT, "
198 << "LocVT, LocInfo, ArgFlags, State))\n";
199 O << IndentStr << IndentStr << "return false;\n";
191200 } else {
192201 Action->dump();
193202 throw "Unknown CCAction!";