llvm.org GIT mirror llvm / f1daf7d
Use common code for both ARM and Thumb-2 instruction and register info. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75067 91177308-0d34-0410-b5e6-96231b3b80d8 David Goodwin 11 years ago
15 changed file(s) with 107 addition(s) and 1128 deletion(s). Raw diff Collapse all Expand all
214214 --Pos;
215215 }
216216
217 TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
218 CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
217 bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
218 assert(Emitted && "Unable to issue a live-in copy instruction!\n");
219 (void) Emitted;
220
221 CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
219222 if (Coalesced) {
220223 if (&*InsertPos == UseMI) ++InsertPos;
221224 MBB->erase(UseMI);
246249 E = MRI.livein_end(); LI != E; ++LI)
247250 if (LI->second) {
248251 const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
249 TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
250 LI->second, LI->first, RC, RC);
252 bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
253 LI->second, LI->first, RC, RC);
254 assert(Emitted && "Unable to issue a live-in copy instruction!\n");
255 (void) Emitted;
251256 }
252257 }
253258 }
226226 0
227227 };
228228
229 if (STI.isThumb()) {
229 if (STI.isThumb1Only()) {
230230 return STI.isTargetDarwin()
231231 ? DarwinThumbCalleeSavedRegClasses : ThumbCalleeSavedRegClasses;
232232 }
564564 }
565565
566566 bool ForceLRSpill = false;
567 if (!LRSpilled && AFI->isThumbFunction()) {
567 if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
568568 unsigned FnSize = TII.GetFunctionSizeInBytes(MF);
569569 // Force LR to be spilled if the Thumb function size is > 2048. This enables
570570 // use of BL to implement far jump. If it turns out that it's not needed
606606 if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
607607 for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
608608 unsigned Reg = UnspilledCS1GPRs[i];
609 // Don't spiil high register if the function is thumb
610 if (!AFI->isThumbFunction() ||
609 // Don't spill high register if the function is thumb1
610 if (!AFI->isThumb1OnlyFunction() ||
611611 isARMLowRegister(Reg) || Reg == ARM::LR) {
612612 MF.getRegInfo().setPhysRegUsed(Reg);
613613 AFI->setCSRegisterIsSpilled(Reg);
617617 }
618618 }
619619 } else if (!UnspilledCS2GPRs.empty() &&
620 !AFI->isThumbFunction()) {
620 !AFI->isThumb1OnlyFunction()) {
621621 unsigned Reg = UnspilledCS2GPRs.front();
622622 MF.getRegInfo().setPhysRegUsed(Reg);
623623 AFI->setCSRegisterIsSpilled(Reg);
630630 // to materialize a stack offset. If so, either spill one additional
631631 // callee-saved register or reserve a special spill slot to facilitate
632632 // register scavenging.
633 if (RS && !ExtraCSSpill && !AFI->isThumbFunction()) {
633 if (RS && !ExtraCSSpill && !AFI->isThumb1OnlyFunction()) {
634634 MachineFrameInfo *MFI = MF.getFrameInfo();
635635 unsigned Size = estimateStackSize(MF, MFI);
636636 unsigned Limit = (1 << 12) - 1;
729729 return ARM::R0;
730730 case ARM::R3:
731731 // FIXME!
732 return STI.isThumb() ? 0 : ARM::R2;
732 return STI.isThumb1Only() ? 0 : ARM::R2;
733733 case ARM::R5:
734734 return ARM::R4;
735735 case ARM::R7:
803803 return ARM::R1;
804804 case ARM::R2:
805805 // FIXME!
806 return STI.isThumb() ? 0 : ARM::R3;
806 return STI.isThumb1Only() ? 0 : ARM::R3;
807807 case ARM::R4:
808808 return ARM::R5;
809809 case ARM::R6:
10021002 unsigned findScratchRegister(RegScavenger *RS, const TargetRegisterClass *RC,
10031003 ARMFunctionInfo *AFI) {
10041004 unsigned Reg = RS ? RS->FindUnusedReg(RC, true) : (unsigned) ARM::R12;
1005 assert (!AFI->isThumbFunction());
1005 assert (!AFI->isThumb1OnlyFunction());
10061006 if (Reg == 0)
10071007 // Try a already spilled CS register.
10081008 Reg = RS->FindUnusedReg(RC, AFI->getSpilledCSRegisters());
123123 const TargetInstrInfo *TII;
124124 ARMFunctionInfo *AFI;
125125 bool isThumb;
126 bool isThumb1Only;
126127 bool isThumb2;
127128 public:
128129 static char ID;
213214 TII = Fn.getTarget().getInstrInfo();
214215 AFI = Fn.getInfo();
215216 isThumb = AFI->isThumbFunction();
217 isThumb1Only = AFI->isThumb1OnlyFunction();
216218 isThumb2 = AFI->isThumb2Function();
217219
218220 HasFarJump = false;
869869 TLI.getPointerTy());
870870
871871 SDNode *ResNode;
872 if (Subtarget->isThumb())
872 if (Subtarget->isThumb1Only())
873873 ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
874874 CPIdx, CurDAG->getEntryNode());
875875 else {
895895 // Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
896896 int FI = cast(N)->getIndex();
897897 SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
898 if (Subtarget->isThumb()) {
898 if (Subtarget->isThumb1Only()) {
899899 return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
900900 CurDAG->getTargetConstant(0, MVT::i32));
901901 } else {
902902 SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
903903 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
904904 CurDAG->getRegister(0, MVT::i32) };
905 return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5);
905 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ? ARM::t2ADDri : ARM::ADDri,
906 MVT::i32, Ops, 5);
906907 }
907908 }
908909 case ISD::ADD: {
909 if (!Subtarget->isThumb())
910 if (!Subtarget->isThumb1Only())
910911 break;
911912 // Select add sp, c to tADDhirr.
912913 SDValue N0 = Op.getOperand(0);
937938 CurDAG->getTargetConstant(ShImm, MVT::i32),
938939 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
939940 CurDAG->getRegister(0, MVT::i32) };
940 return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
941 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
942 ARM::t2ADDrs : ARM::ADDrs, MVT::i32, Ops, 7);
941943 }
942944 if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
943945 SDValue V = Op.getOperand(0);
946948 CurDAG->getTargetConstant(ShImm, MVT::i32),
947949 getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
948950 CurDAG->getRegister(0, MVT::i32) };
949 return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
951 return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
952 ARM::t2RSBrs : ARM::RSBrs, MVT::i32, Ops, 7);
950953 }
951954 }
952955 break;
188188 setLibcallName(RTLIB::SRL_I128, 0);
189189 setLibcallName(RTLIB::SRA_I128, 0);
190190
191 if (Subtarget->isThumb())
191 if (Subtarget->isThumb1Only())
192192 addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
193193 else
194194 addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
195 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
195 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
196196 addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
197197 addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
198198
255255 } else {
256256 setOperationAction(ISD::MUL, MVT::i64, Expand);
257257 setOperationAction(ISD::MULHU, MVT::i32, Expand);
258 if (!Subtarget->isThumb() && !Subtarget->hasV6Ops())
258 if (!Subtarget->isThumb1Only() && !Subtarget->hasV6Ops())
259259 setOperationAction(ISD::MULHS, MVT::i32, Expand);
260260 }
261261 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
309309 }
310310 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
311311
312 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb())
312 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
313313 // Turn f64->i64 into FMRRD, i64 -> f64 to FMDRR iff target supports vfp2.
314314 setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
315315
339339 setOperationAction(ISD::FCOS, MVT::f64, Expand);
340340 setOperationAction(ISD::FREM, MVT::f64, Expand);
341341 setOperationAction(ISD::FREM, MVT::f32, Expand);
342 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
342 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
343343 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
344344 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
345345 }
347347 setOperationAction(ISD::FPOW, MVT::f32, Expand);
348348
349349 // int <-> fp are custom expanded into bit_convert + ARMISD ops.
350 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
350 if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
351351 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
352352 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
353353 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
941941 // ARM call to a local ARM function is predicable.
942942 isLocalARMFunc = !Subtarget->isThumb() && !isExt;
943943 // tBX takes a register source operand.
944 if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
944 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
945945 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
946946 ARMCP::CPStub, 4);
947947 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
960960 isARMFunc = !Subtarget->isThumb() || isStub;
961961 // tBX takes a register source operand.
962962 const char *Sym = S->getSymbol();
963 if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
963 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
964964 ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex,
965965 ARMCP::CPStub, 4);
966966 SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
976976
977977 // FIXME: handle tail calls differently.
978978 unsigned CallOpc;
979 if (Subtarget->isThumb()) {
979 if (Subtarget->isThumb1Only()) {
980980 if (!Subtarget->hasV5TOps() && (!isDirect || isARMFunc))
981981 CallOpc = ARMISD::CALL_NOLINK;
982982 else
986986 ? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
987987 : ARMISD::CALL_NOLINK;
988988 }
989 if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb()) {
989 if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
990990 // implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
991991 Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
992992 InFlag = Chain.getValue(1);
13441344 ARMFunctionInfo *AFI = MF.getInfo();
13451345
13461346 TargetRegisterClass *RC;
1347 if (AFI->isThumbFunction())
1347 if (AFI->isThumb1OnlyFunction())
13481348 RC = ARM::tGPRRegisterClass;
13491349 else
13501350 RC = ARM::GPRRegisterClass;
14221422 RC = ARM::SPRRegisterClass;
14231423 else if (FloatABIType == FloatABI::Hard && RegVT == MVT::f64)
14241424 RC = ARM::DPRRegisterClass;
1425 else if (AFI->isThumbFunction())
1425 else if (AFI->isThumb1OnlyFunction())
14261426 RC = ARM::tGPRRegisterClass;
14271427 else
14281428 RC = ARM::GPRRegisterClass;
15001500 SmallVector MemOps;
15011501 for (; NumGPRs < 4; ++NumGPRs) {
15021502 TargetRegisterClass *RC;
1503 if (AFI->isThumbFunction())
1503 if (AFI->isThumb1OnlyFunction())
15041504 RC = ARM::tGPRRegisterClass;
15051505 else
15061506 RC = ARM::GPRRegisterClass;
15431543 return false;
15441544 }
15451545
1546 static bool isLegalCmpImmediate(unsigned C, bool isThumb) {
1547 return ( isThumb && (C & ~255U) == 0) ||
1548 (!isThumb && ARM_AM::getSOImmVal(C) != -1);
1546 static bool isLegalCmpImmediate(unsigned C, bool isThumb1Only) {
1547 return ( isThumb1Only && (C & ~255U) == 0) ||
1548 (!isThumb1Only && ARM_AM::getSOImmVal(C) != -1);
15491549 }
15501550
15511551 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
15521552 /// the given operands.
15531553 static SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
1554 SDValue &ARMCC, SelectionDAG &DAG, bool isThumb,
1554 SDValue &ARMCC, SelectionDAG &DAG, bool isThumb1Only,
15551555 DebugLoc dl) {
15561556 if (ConstantSDNode *RHSC = dyn_cast(RHS.getNode())) {
15571557 unsigned C = RHSC->getZExtValue();
1558 if (!isLegalCmpImmediate(C, isThumb)) {
1558 if (!isLegalCmpImmediate(C, isThumb1Only)) {
15591559 // Constant does not fit, try adjusting it by one?
15601560 switch (CC) {
15611561 default: break;
15621562 case ISD::SETLT:
15631563 case ISD::SETGE:
1564 if (isLegalCmpImmediate(C-1, isThumb)) {
1564 if (isLegalCmpImmediate(C-1, isThumb1Only)) {
15651565 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
15661566 RHS = DAG.getConstant(C-1, MVT::i32);
15671567 }
15681568 break;
15691569 case ISD::SETULT:
15701570 case ISD::SETUGE:
1571 if (C > 0 && isLegalCmpImmediate(C-1, isThumb)) {
1571 if (C > 0 && isLegalCmpImmediate(C-1, isThumb1Only)) {
15721572 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
15731573 RHS = DAG.getConstant(C-1, MVT::i32);
15741574 }
15751575 break;
15761576 case ISD::SETLE:
15771577 case ISD::SETGT:
1578 if (isLegalCmpImmediate(C+1, isThumb)) {
1578 if (isLegalCmpImmediate(C+1, isThumb1Only)) {
15791579 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
15801580 RHS = DAG.getConstant(C+1, MVT::i32);
15811581 }
15821582 break;
15831583 case ISD::SETULE:
15841584 case ISD::SETUGT:
1585 if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb)) {
1585 if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb1Only)) {
15861586 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
15871587 RHS = DAG.getConstant(C+1, MVT::i32);
15881588 }
16311631 if (LHS.getValueType() == MVT::i32) {
16321632 SDValue ARMCC;
16331633 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1634 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb(), dl);
1634 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
16351635 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
16361636 }
16371637
16661666 if (LHS.getValueType() == MVT::i32) {
16671667 SDValue ARMCC;
16681668 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
1669 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb(), dl);
1669 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
16701670 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
16711671 Chain, Dest, ARMCC, CCR,Cmp);
16721672 }
19691969 return SDValue();
19701970
19711971 // If we are in thumb mode, we don't have RRX.
1972 if (ST->isThumb()) return SDValue();
1972 if (ST->isThumb1Only()) return SDValue();
19731973
19741974 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
19751975 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
28092809 if (!VT.isSimple())
28102810 return false;
28112811
2812 if (Subtarget->isThumb()) {
2812 if (Subtarget->isThumb()) { // FIXME for thumb2
28132813 if (V < 0)
28142814 return false;
28152815
28752875 case 0: // no scale reg, must be "r+i" or "r", or "i".
28762876 break;
28772877 case 1:
2878 if (Subtarget->isThumb())
2878 if (Subtarget->isThumb()) // FIXME for thumb2
28792879 return false;
28802880 // FALL THROUGH.
28812881 default:
31293129 // GCC RS6000 Constraint Letters
31303130 switch (Constraint[0]) {
31313131 case 'l':
3132 if (Subtarget->isThumb())
3132 if (Subtarget->isThumb1Only())
31333133 return std::make_pair(0U, ARM::tGPRRegisterClass);
31343134 else
31353135 return std::make_pair(0U, ARM::GPRRegisterClass);
32103210
32113211 switch (Constraint) {
32123212 case 'I':
3213 if (Subtarget->isThumb()) {
3214 // This must be a constant between 0 and 255, for ADD immediates.
3213 if (Subtarget->isThumb1Only()) {
3214 // This must be a constant between 0 and 255, for ADD
3215 // immediates.
32153216 if (CVal >= 0 && CVal <= 255)
3217 break;
3218 } else if (Subtarget->isThumb2()) {
3219 // A constant that can be used as an immediate value in a
3220 // data-processing instruction.
3221 if (ARM_AM::getT2SOImmVal(CVal) != -1)
32163222 break;
32173223 } else {
32183224 // A constant that can be used as an immediate value in a
32233229 return;
32243230
32253231 case 'J':
3226 if (Subtarget->isThumb()) {
3232 if (Subtarget->isThumb()) { // FIXME thumb2
32273233 // This must be a constant between -255 and -1, for negated ADD
32283234 // immediates. This can be used in GCC with an "n" modifier that
32293235 // prints the negated value, for use with SUB instructions. It is
32403246 return;
32413247
32423248 case 'K':
3243 if (Subtarget->isThumb()) {
3249 if (Subtarget->isThumb1Only()) {
32443250 // A 32-bit value where only one byte has a nonzero value. Exclude
32453251 // zero to match GCC. This constraint is used by GCC internally for
32463252 // constants that can be loaded with a move/shift combination.
32473253 // It is not useful otherwise but is implemented for compatibility.
32483254 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
3255 break;
3256 } else if (Subtarget->isThumb2()) {
3257 // A constant whose bitwise inverse can be used as an immediate
3258 // value in a data-processing instruction. This can be used in GCC
3259 // with a "B" modifier that prints the inverted value, for use with
3260 // BIC and MVN instructions. It is not useful otherwise but is
3261 // implemented for compatibility.
3262 if (ARM_AM::getT2SOImmVal(~CVal) != -1)
32493263 break;
32503264 } else {
32513265 // A constant whose bitwise inverse can be used as an immediate
32593273 return;
32603274
32613275 case 'L':
3262 if (Subtarget->isThumb()) {
3276 if (Subtarget->isThumb1Only()) {
32633277 // This must be a constant between -7 and 7,
32643278 // for 3-operand ADD/SUB immediate instructions.
32653279 if (CVal >= -7 && CVal < 7)
3280 break;
3281 } else if (Subtarget->isThumb2()) {
3282 // A constant whose negation can be used as an immediate value in a
3283 // data-processing instruction. This can be used in GCC with an "n"
3284 // modifier that prints the negated value, for use with SUB
3285 // instructions. It is not useful otherwise but is implemented for
3286 // compatibility.
3287 if (ARM_AM::getT2SOImmVal(-CVal) != -1)
32663288 break;
32673289 } else {
32683290 // A constant whose negation can be used as an immediate value in a
32763298 return;
32773299
32783300 case 'M':
3279 if (Subtarget->isThumb()) {
3301 if (Subtarget->isThumb()) { // FIXME thumb2
32803302 // This must be a multiple of 4 between 0 and 1020, for
32813303 // ADD sp + immediate.
32823304 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
32913313 return;
32923314
32933315 case 'N':
3294 if (Subtarget->isThumb()) {
3316 if (Subtarget->isThumb()) { // FIXME thumb2
32953317 // This must be a constant between 0 and 31, for shift amounts.
32963318 if (CVal >= 0 && CVal <= 31)
32973319 break;
32993321 return;
33003322
33013323 case 'O':
3302 if (Subtarget->isThumb()) {
3324 if (Subtarget->isThumb()) { // FIXME thumb2
33033325 // This must be a multiple of 4 between -508 and 508, for
33043326 // ADD/SUB sp = sp + immediate.
33053327 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
118118 def tADJCALLSTACKUP :
119119 PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
120120 "@ tADJCALLSTACKUP $amt1",
121 [(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb]>;
121 [(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb1Only]>;
122122
123123 def tADJCALLSTACKDOWN :
124124 PseudoInst<(outs), (ins i32imm:$amt),
125125 "@ tADJCALLSTACKDOWN $amt",
126 [(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb]>;
126 [(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb1Only]>;
127127 }
128128
129129 let isNotDuplicable = 1 in
154154 //
155155
156156 let isReturn = 1, isTerminator = 1 in {
157 def tBX_RET : TI<(outs), (ins), "bx lr", [(ARMretflag)]>;
157 def tBX_RET : T1I<(outs), (ins), "bx lr", [(ARMretflag)]>;
158158 // Alternative return instruction used by vararg functions.
159159 def tBX_RET_vararg : T1I<(outs), (ins tGPR:$target), "bx $target", []>;
160160 }
470470 def tREV : T1I<(outs tGPR:$dst), (ins tGPR:$src),
471471 "rev $dst, $src",
472472 [(set tGPR:$dst, (bswap tGPR:$src))]>,
473 Requires<[IsThumb, HasV6]>;
473 Requires<[IsThumb1Only, HasV6]>;
474474
475475 def tREV16 : T1I<(outs tGPR:$dst), (ins tGPR:$src),
476476 "rev16 $dst, $src",
479479 (or (and (shl tGPR:$src, (i32 8)), 0xFF00),
480480 (or (and (srl tGPR:$src, (i32 8)), 0xFF0000),
481481 (and (shl tGPR:$src, (i32 8)), 0xFF000000)))))]>,
482 Requires<[IsThumb, HasV6]>;
482 Requires<[IsThumb1Only, HasV6]>;
483483
484484 def tREVSH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
485485 "revsh $dst, $src",
487487 (sext_inreg
488488 (or (srl (and tGPR:$src, 0xFFFF), (i32 8)),
489489 (shl tGPR:$src, (i32 8))), i16))]>,
490 Requires<[IsThumb, HasV6]>;
490 Requires<[IsThumb1Only, HasV6]>;
491491
492492 // rotate right register
493493 let Defs = [CPSR] in
539539 def tSXTB : T1I<(outs tGPR:$dst), (ins tGPR:$src),
540540 "sxtb $dst, $src",
541541 [(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>,
542 Requires<[IsThumb, HasV6]>;
542 Requires<[IsThumb1Only, HasV6]>;
543543
544544 // sign-extend short
545545 def tSXTH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
546546 "sxth $dst, $src",
547547 [(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>,
548 Requires<[IsThumb, HasV6]>;
548 Requires<[IsThumb1Only, HasV6]>;
549549
550550 // test
551551 let isCommutable = 1, Defs = [CPSR] in
557557 def tUXTB : T1I<(outs tGPR:$dst), (ins tGPR:$src),
558558 "uxtb $dst, $src",
559559 [(set tGPR:$dst, (and tGPR:$src, 0xFF))]>,
560 Requires<[IsThumb, HasV6]>;
560 Requires<[IsThumb1Only, HasV6]>;
561561
562562 // zero-extend short
563563 def tUXTH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
564564 "uxth $dst, $src",
565565 [(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>,
566 Requires<[IsThumb, HasV6]>;
566 Requires<[IsThumb1Only, HasV6]>;
567567
568568
569569 // Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC DAG operation.
431431 //
432432
433433 let isNotDuplicable = 1 in
434 def t2PICADD : T2XI<(outs tGPR:$dst), (ins tGPR:$lhs, pclabel:$cp),
434 def t2PICADD : T2XI<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp),
435435 "$cp:\n\tadd $dst, pc",
436 [(set tGPR:$dst, (ARMpic_add tGPR:$lhs, imm:$cp))]>;
436 [(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>;
437437
438438
439439 // LEApcrel - Load a pc-relative address into a register without offending the
118118 JumpTableUId(0), ConstPoolEntryUId(0) {}
119119
120120 bool isThumbFunction() const { return isThumb; }
121 bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
121122 bool isThumb2Function() const { return isThumb && hasThumb2; }
122123
123124 unsigned getAlign() const { return Align; }
223223 } else if (inTextSection) {
224224 // An instruction
225225 atInsnStart = false;
226 if (Subtarget->isThumb()) {
226 if (Subtarget->isThumb()) { // FIXME thumb2
227227 // BL and BLX are 4 bytes, all others 2.
228228 if (strncmp(Str, "blx", strlen("blx"))==0) {
229229 const char* p = Str+3;
7070
7171 // FIXME
7272 switch (MBB.back().getOpcode()) {
73 //case ARM::t2BX_RET:
73 case ARM::t2BX_RET:
7474 // case ARM::LDM_RET:
7575 case ARM::t2B: // Uncond branch.
7676 case ARM::t2BR_JTr: // Jumptable branch.
8989
9090 return false;
9191 }
92
93
94 bool Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
95 MachineBasicBlock::iterator I,
96 unsigned DestReg, unsigned SrcReg,
97 const TargetRegisterClass *DestRC,
98 const TargetRegisterClass *SrcRC) const {
99 DebugLoc DL = DebugLoc::getUnknownLoc();
100 if (I != MBB.end()) DL = I->getDebugLoc();
101
102 if (DestRC == ARM::GPRRegisterClass) {
103 if (SrcRC == ARM::GPRRegisterClass) {
104 return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC);
105 } else if (SrcRC == ARM::tGPRRegisterClass) {
106 BuildMI(MBB, I, DL, get(ARM::tMOVlor2hir), DestReg).addReg(SrcReg);
107 return true;
108 }
109 } else if (DestRC == ARM::tGPRRegisterClass) {
110 if (SrcRC == ARM::GPRRegisterClass) {
111 BuildMI(MBB, I, DL, get(ARM::tMOVhir2lor), DestReg).addReg(SrcReg);
112 return true;
113 } else if (SrcRC == ARM::tGPRRegisterClass) {
114 BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
115 return true;
116 }
117 }
118
119 return false;
120 }
121
122
123
124
125
126
127 bool Thumb2InstrInfo::isMoveInstr(const MachineInstr &MI,
128 unsigned &SrcReg, unsigned &DstReg,
129 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
130 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
131
132 unsigned oc = MI.getOpcode();
133 switch (oc) {
134 default:
135 return false;
136 case ARM::tMOVr:
137 case ARM::tMOVhir2lor:
138 case ARM::tMOVlor2hir:
139 case ARM::tMOVhir2hir:
140 assert(MI.getDesc().getNumOperands() >= 2 &&
141 MI.getOperand(0).isReg() &&
142 MI.getOperand(1).isReg() &&
143 "Invalid Thumb MOV instruction");
144 SrcReg = MI.getOperand(1).getReg();
145 DstReg = MI.getOperand(0).getReg();
146 return true;
147 }
148 }
149
150 unsigned Thumb2InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
151 int &FrameIndex) const {
152 switch (MI->getOpcode()) {
153 default: break;
154 case ARM::tRestore:
155 if (MI->getOperand(1).isFI() &&
156 MI->getOperand(2).isImm() &&
157 MI->getOperand(2).getImm() == 0) {
158 FrameIndex = MI->getOperand(1).getIndex();
159 return MI->getOperand(0).getReg();
160 }
161 break;
162 }
163 return 0;
164 }
165
166 unsigned Thumb2InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
167 int &FrameIndex) const {
168 switch (MI->getOpcode()) {
169 default: break;
170 case ARM::tSpill:
171 if (MI->getOperand(1).isFI() &&
172 MI->getOperand(2).isImm() &&
173 MI->getOperand(2).getImm() == 0) {
174 FrameIndex = MI->getOperand(1).getIndex();
175 return MI->getOperand(0).getReg();
176 }
177 break;
178 }
179 return 0;
180 }
181
182 bool Thumb2InstrInfo::
183 canFoldMemoryOperand(const MachineInstr *MI,
184 const SmallVectorImpl &Ops) const {
185 if (Ops.size() != 1) return false;
186
187 unsigned OpNum = Ops[0];
188 unsigned Opc = MI->getOpcode();
189 switch (Opc) {
190 default: break;
191 case ARM::tMOVr:
192 case ARM::tMOVlor2hir:
193 case ARM::tMOVhir2lor:
194 case ARM::tMOVhir2hir: {
195 if (OpNum == 0) { // move -> store
196 unsigned SrcReg = MI->getOperand(1).getReg();
197 if (RI.isPhysicalRegister(SrcReg) && !isARMLowRegister(SrcReg))
198 // tSpill cannot take a high register operand.
199 return false;
200 } else { // move -> load
201 unsigned DstReg = MI->getOperand(0).getReg();
202 if (RI.isPhysicalRegister(DstReg) && !isARMLowRegister(DstReg))
203 // tRestore cannot target a high register operand.
204 return false;
205 }
206 return true;
207 }
208 }
209
210 return false;
211 }
212
213 void Thumb2InstrInfo::
214 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
215 unsigned SrcReg, bool isKill, int FI,
216 const TargetRegisterClass *RC) const {
217 DebugLoc DL = DebugLoc::getUnknownLoc();
218 if (I != MBB.end()) DL = I->getDebugLoc();
219
220 assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
221
222 if (RC == ARM::tGPRRegisterClass) {
223 BuildMI(MBB, I, DL, get(ARM::tSpill))
224 .addReg(SrcReg, getKillRegState(isKill))
225 .addFrameIndex(FI).addImm(0);
226 }
227 }
228
229 void Thumb2InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
230 bool isKill,
231 SmallVectorImpl &Addr,
232 const TargetRegisterClass *RC,
233 SmallVectorImpl &NewMIs) const{
234 DebugLoc DL = DebugLoc::getUnknownLoc();
235 unsigned Opc = 0;
236
237 assert(RC == ARM::GPRRegisterClass && "Unknown regclass!");
238 if (RC == ARM::GPRRegisterClass) {
239 Opc = Addr[0].isFI() ? ARM::tSpill : ARM::tSTR;
240 }
241
242 MachineInstrBuilder MIB =
243 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
244 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
245 MIB.addOperand(Addr[i]);
246 NewMIs.push_back(MIB);
247 return;
248 }
249
250 void Thumb2InstrInfo::
251 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
252 unsigned DestReg, int FI,
253 const TargetRegisterClass *RC) const {
254 DebugLoc DL = DebugLoc::getUnknownLoc();
255 if (I != MBB.end()) DL = I->getDebugLoc();
256
257 assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
258
259 if (RC == ARM::tGPRRegisterClass) {
260 BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
261 .addFrameIndex(FI).addImm(0);
262 }
263 }
264
265 void Thumb2InstrInfo::
266 loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
267 SmallVectorImpl &Addr,
268 const TargetRegisterClass *RC,
269 SmallVectorImpl &NewMIs) const {
270 DebugLoc DL = DebugLoc::getUnknownLoc();
271 unsigned Opc = 0;
272
273 if (RC == ARM::GPRRegisterClass) {
274 Opc = Addr[0].isFI() ? ARM::tRestore : ARM::tLDR;
275 }
276
277 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
278 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
279 MIB.addOperand(Addr[i]);
280 NewMIs.push_back(MIB);
281 return;
282 }
283
284 bool Thumb2InstrInfo::
285 spillCalleeSavedRegisters(MachineBasicBlock &MBB,
286 MachineBasicBlock::iterator MI,
287 const std::vector &CSI) const {
288 if (CSI.empty())
289 return false;
290
291 DebugLoc DL = DebugLoc::getUnknownLoc();
292 if (MI != MBB.end()) DL = MI->getDebugLoc();
293
294 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, get(ARM::tPUSH));
295 for (unsigned i = CSI.size(); i != 0; --i) {
296 unsigned Reg = CSI[i-1].getReg();
297 // Add the callee-saved register as live-in. It's killed at the spill.
298 MBB.addLiveIn(Reg);
299 MIB.addReg(Reg, RegState::Kill);
300 }
301 return true;
302 }
303
304 bool Thumb2InstrInfo::
305 restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
306 MachineBasicBlock::iterator MI,
307 const std::vector &CSI) const {
308 MachineFunction &MF = *MBB.getParent();
309 ARMFunctionInfo *AFI = MF.getInfo();
310 if (CSI.empty())
311 return false;
312
313 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
314 MachineInstr *PopMI = MF.CreateMachineInstr(get(ARM::tPOP),MI->getDebugLoc());
315 for (unsigned i = CSI.size(); i != 0; --i) {
316 unsigned Reg = CSI[i-1].getReg();
317 if (Reg == ARM::LR) {
318 // Special epilogue for vararg functions. See emitEpilogue
319 if (isVarArg)
320 continue;
321 Reg = ARM::PC;
322 PopMI->setDesc(get(ARM::tPOP_RET));
323 MI = MBB.erase(MI);
324 }
325 PopMI->addOperand(MachineOperand::CreateReg(Reg, true));
326 }
327
328 // It's illegal to emit pop instruction without operands.
329 if (PopMI->getNumOperands() > 0)
330 MBB.insert(MI, PopMI);
331
332 return true;
333 }
334
335 MachineInstr *Thumb2InstrInfo::
336 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
337 const SmallVectorImpl &Ops, int FI) const {
338 if (Ops.size() != 1) return NULL;
339
340 unsigned OpNum = Ops[0];
341 unsigned Opc = MI->getOpcode();
342 MachineInstr *NewMI = NULL;
343 switch (Opc) {
344 default: break;
345 case ARM::tMOVr:
346 case ARM::tMOVlor2hir:
347 case ARM::tMOVhir2lor:
348 case ARM::tMOVhir2hir: {
349 if (OpNum == 0) { // move -> store
350 unsigned SrcReg = MI->getOperand(1).getReg();
351 bool isKill = MI->getOperand(1).isKill();
352 if (RI.isPhysicalRegister(SrcReg) && !isARMLowRegister(SrcReg))
353 // tSpill cannot take a high register operand.
354 break;
355 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
356 .addReg(SrcReg, getKillRegState(isKill))
357 .addFrameIndex(FI).addImm(0);
358 } else { // move -> load
359 unsigned DstReg = MI->getOperand(0).getReg();
360 if (RI.isPhysicalRegister(DstReg) && !isARMLowRegister(DstReg))
361 // tRestore cannot target a high register operand.
362 break;
363 bool isDead = MI->getOperand(0).isDead();
364 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
365 .addReg(DstReg, RegState::Define | getDeadRegState(isDead))
366 .addFrameIndex(FI).addImm(0);
367 }
368 break;
369 }
370 }
371
372 return NewMI;
373 }
4141 /// always be able to get register info as well (through this method).
4242 ///
4343 const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
44
45 bool copyRegToReg(MachineBasicBlock &MBB,
46 MachineBasicBlock::iterator I,
47 unsigned DestReg, unsigned SrcReg,
48 const TargetRegisterClass *DestRC,
49 const TargetRegisterClass *SrcRC) const;
50
51
52
53
54 bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
55 MachineBasicBlock::iterator MI,
56 const std::vector &CSI) const;
57 bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
58 MachineBasicBlock::iterator MI,
59 const std::vector &CSI) const;
60
61 bool isMoveInstr(const MachineInstr &MI,
62 unsigned &SrcReg, unsigned &DstReg,
63 unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
64 unsigned isLoadFromStackSlot(const MachineInstr *MI,
65 int &FrameIndex) const;
66 unsigned isStoreToStackSlot(const MachineInstr *MI,
67 int &FrameIndex) const;
68
69 void storeRegToStackSlot(MachineBasicBlock &MBB,
70 MachineBasicBlock::iterator MBBI,
71 unsigned SrcReg, bool isKill, int FrameIndex,
72 const TargetRegisterClass *RC) const;
73
74 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
75 SmallVectorImpl &Addr,
76 const TargetRegisterClass *RC,
77 SmallVectorImpl &NewMIs) const;
78
79 void loadRegFromStackSlot(MachineBasicBlock &MBB,
80 MachineBasicBlock::iterator MBBI,
81 unsigned DestReg, int FrameIndex,
82 const TargetRegisterClass *RC) const;
83
84 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
85 SmallVectorImpl &Addr,
86 const TargetRegisterClass *RC,
87 SmallVectorImpl &NewMIs) const;
88
89 bool canFoldMemoryOperand(const MachineInstr *MI,
90 const SmallVectorImpl &Ops) const;
91
92 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
93 MachineInstr* MI,
94 const SmallVectorImpl &Ops,
95 int FrameIndex) const;
96
97 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
98 MachineInstr* MI,
99 const SmallVectorImpl &Ops,
100 MachineInstr* LoadMI) const {
101 return 0;
102 }
103
104
10544 };
10645 }
10746
3333 #include "llvm/Support/ErrorHandling.h"
3434 using namespace llvm;
3535
36 static cl::opt
37 Thumb2RegScavenging("enable-thumb2-reg-scavenging",
38 cl::Hidden,
39 cl::desc("Enable register scavenging on Thumb-2"));
40
4136 Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMBaseInstrInfo &tii,
4237 const ARMSubtarget &sti)
4338 : ARMBaseRegisterInfo(tii, sti) {
5752 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
5853
5954 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci), DestReg)
60 .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg);
55 .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0);
6156 }
6257
63 const TargetRegisterClass*
64 Thumb2RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, MVT VT) const {
65 if (isARMLowRegister(Reg))
66 return ARM::tGPRRegisterClass;
67 switch (Reg) {
68 default:
69 break;
70 case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
71 case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
72 return ARM::GPRRegisterClass;
73 }
74
75 return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
76 }
77
78 bool
79 Thumb2RegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
80 return Thumb2RegScavenging;
81 }
82
83 bool Thumb2RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
84 const MachineFrameInfo *FFI = MF.getFrameInfo();
85 unsigned CFSize = FFI->getMaxCallFrameSize();
86 // It's not always a good idea to include the call frame as part of the
87 // stack frame. ARM (especially Thumb) has small immediate offset to
88 // address the stack frame. So a large call frame can cause poor codegen
89 // and may even makes it impossible to scavenge a register.
90 if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
91 return false;
92
93 return !MF.getFrameInfo()->hasVarSizedObjects();
94 }
95
96 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
97 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
98 /// in a register using mov / mvn sequences or load the immediate from a
99 /// constpool entry.
100 static
101 void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
102 MachineBasicBlock::iterator &MBBI,
103 unsigned DestReg, unsigned BaseReg,
104 int NumBytes, bool CanChangeCC,
105 const TargetInstrInfo &TII,
106 const Thumb2RegisterInfo& MRI,
107 DebugLoc dl) {
108 bool isHigh = !isARMLowRegister(DestReg) ||
109 (BaseReg != 0 && !isARMLowRegister(BaseReg));
110 bool isSub = false;
111 // Subtract doesn't have high register version. Load the negative value
112 // if either base or dest register is a high register. Also, if do not
113 // issue sub as part of the sequence if condition register is to be
114 // preserved.
115 if (NumBytes < 0 && !isHigh && CanChangeCC) {
116 isSub = true;
117 NumBytes = -NumBytes;
118 }
119 unsigned LdReg = DestReg;
120 if (DestReg == ARM::SP) {
121 assert(BaseReg == ARM::SP && "Unexpected!");
122 LdReg = ARM::R3;
123 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
124 .addReg(ARM::R3, RegState::Kill);
125 }
126
127 if (NumBytes <= 255 && NumBytes >= 0)
128 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
129 else if (NumBytes < 0 && NumBytes >= -255) {
130 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
131 BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), LdReg)
132 .addReg(LdReg, RegState::Kill);
133 } else
134 MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, NumBytes);
135
136 // Emit add / sub.
137 int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
138 const MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl,
139 TII.get(Opc), DestReg);
140 if (DestReg == ARM::SP || isSub)
141 MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
142 else
143 MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
144 if (DestReg == ARM::SP)
145 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
146 .addReg(ARM::R12, RegState::Kill);
147 }
148
149 /// calcNumMI - Returns the number of instructions required to materialize
150 /// the specific add / sub r, c instruction.
151 static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
152 unsigned NumBits, unsigned Scale) {
153 unsigned NumMIs = 0;
154 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
155
156 if (Opc == ARM::tADDrSPi) {
157 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
158 Bytes -= ThisVal;
159 NumMIs++;
160 NumBits = 8;
161 Scale = 1; // Followed by a number of tADDi8.
162 Chunk = ((1 << NumBits) - 1) * Scale;
163 }
164
165 NumMIs += Bytes / Chunk;
166 if ((Bytes % Chunk) != 0)
167 NumMIs++;
168 if (ExtraOpc)
169 NumMIs++;
170 return NumMIs;
171 }
172
173 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
174 /// a destreg = basereg + immediate in Thumb code.
175 static
176 void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
177 MachineBasicBlock::iterator &MBBI,
178 unsigned DestReg, unsigned BaseReg,
179 int NumBytes, const TargetInstrInfo &TII,
180 const Thumb2RegisterInfo& MRI,
181 DebugLoc dl) {
182 bool isSub = NumBytes < 0;
183 unsigned Bytes = (unsigned)NumBytes;
184 if (isSub) Bytes = -NumBytes;
185 bool isMul4 = (Bytes & 3) == 0;
186 bool isTwoAddr = false;
187 bool DstNotEqBase = false;
188 unsigned NumBits = 1;
189 unsigned Scale = 1;
190 int Opc = 0;
191 int ExtraOpc = 0;
192
193 if (DestReg == BaseReg && BaseReg == ARM::SP) {
194 assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
195 NumBits = 7;
196 Scale = 4;
197 Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
198 isTwoAddr = true;
199 } else if (!isSub && BaseReg == ARM::SP) {
200 // r1 = add sp, 403
201 // =>
202 // r1 = add sp, 100 * 4
203 // r1 = add r1, 3
204 if (!isMul4) {
205 Bytes &= ~3;
206 ExtraOpc = ARM::tADDi3;
207 }
208 NumBits = 8;
209 Scale = 4;
210 Opc = ARM::tADDrSPi;
211 } else {
212 // sp = sub sp, c
213 // r1 = sub sp, c
214 // r8 = sub sp, c
215 if (DestReg != BaseReg)
216 DstNotEqBase = true;
217 NumBits = 8;
218 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
219 isTwoAddr = true;
220 }
221
222 unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
223 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
224 if (NumMIs > Threshold) {
225 // This will expand into too many instructions. Load the immediate from a
226 // constpool entry.
227 emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII,
228 MRI, dl);
229 return;
230 }
231
232 if (DstNotEqBase) {
233 if (isARMLowRegister(DestReg) && isARMLowRegister(BaseReg)) {
234 // If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
235 unsigned Chunk = (1 << 3) - 1;
236 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
237 Bytes -= ThisVal;
238 BuildMI(MBB, MBBI, dl,TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3), DestReg)
239 .addReg(BaseReg, RegState::Kill).addImm(ThisVal);
240 } else {
241 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
242 .addReg(BaseReg, RegState::Kill);
243 }
244 BaseReg = DestReg;
245 }
246
247 unsigned Chunk = ((1 << NumBits) - 1) * Scale;
248 while (Bytes) {
249 unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
250 Bytes -= ThisVal;
251 ThisVal /= Scale;
252 // Build the new tADD / tSUB.
253 if (isTwoAddr)
254 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
255 .addReg(DestReg).addImm(ThisVal);
256 else {
257 bool isKill = BaseReg != ARM::SP;
258 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
259 .addReg(BaseReg, getKillRegState(isKill)).addImm(ThisVal);
260 BaseReg = DestReg;
261
262 if (Opc == ARM::tADDrSPi) {
263 // r4 = add sp, imm
264 // r4 = add r4, imm
265 // ...
266 NumBits = 8;
267 Scale = 1;
268 Chunk = ((1 << NumBits) - 1) * Scale;
269 Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
270 isTwoAddr = true;
271 }
272 }
273 }
274
275 if (ExtraOpc)
276 BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg)
277 .addReg(DestReg, RegState::Kill)
278 .addImm(((unsigned)NumBytes) & 3);
279 }
280
281 static void emitSPUpdate(MachineBasicBlock &MBB,
282 MachineBasicBlock::iterator &MBBI,
283 const TargetInstrInfo &TII, DebugLoc dl,
284 const Thumb2RegisterInfo &MRI,
285 int NumBytes) {
286 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII,
287 MRI, dl);
288 }
289
290 void Thumb2RegisterInfo::
291 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
292 MachineBasicBlock::iterator I) const {
293 if (!hasReservedCallFrame(MF)) {
294 // If we have alloca, convert as follows:
295 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
296 // ADJCALLSTACKUP -> add, sp, sp, amount
297 MachineInstr *Old = I;
298 DebugLoc dl = Old->getDebugLoc();
299 unsigned Amount = Old->getOperand(0).getImm();
300 if (Amount != 0) {
301 // We need to keep the stack aligned properly. To do this, we round the
302 // amount of space needed for the outgoing arguments up to the next
303 // alignment boundary.
304 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
305 Amount = (Amount+Align-1)/Align*Align;
306
307 // Replace the pseudo instruction with a new instruction...
308 unsigned Opc = Old->getOpcode();
309 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
310 emitSPUpdate(MBB, I, TII, dl, *this, -Amount);
311 } else {
312 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
313 emitSPUpdate(MBB, I, TII, dl, *this, Amount);
314 }
315 }
316 }
317 MBB.erase(I);
318 }
319
320 /// emitThumbConstant - Emit a series of instructions to materialize a
321 /// constant.
322 static void emitThumbConstant(MachineBasicBlock &MBB,
323 MachineBasicBlock::iterator &MBBI,
324 unsigned DestReg, int Imm,
325 const TargetInstrInfo &TII,
326 const Thumb2RegisterInfo& MRI,
327 DebugLoc dl) {
328 bool isSub = Imm < 0;
329 if (isSub) Imm = -Imm;
330
331 int Chunk = (1 << 8) - 1;
332 int ThisVal = (Imm > Chunk) ? Chunk : Imm;
333 Imm -= ThisVal;
334 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
335 if (Imm > 0)
336 emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI, dl);
337 if (isSub)
338 BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), DestReg)
339 .addReg(DestReg, RegState::Kill);
340 }
341
342 void Thumb2RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
343 int SPAdj, RegScavenger *RS) const{
344 unsigned i = 0;
345 MachineInstr &MI = *II;
346 MachineBasicBlock &MBB = *MI.getParent();
347 MachineFunction &MF = *MBB.getParent();
348 ARMFunctionInfo *AFI = MF.getInfo();
349 DebugLoc dl = MI.getDebugLoc();
350
351 while (!MI.getOperand(i).isFI()) {
352 ++i;
353 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
354 }
355
356 unsigned FrameReg = ARM::SP;
357 int FrameIndex = MI.getOperand(i).getIndex();
358 int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
359 MF.getFrameInfo()->getStackSize() + SPAdj;
360
361 if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
362 Offset -= AFI->getGPRCalleeSavedArea1Offset();
363 else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
364 Offset -= AFI->getGPRCalleeSavedArea2Offset();
365 else if (hasFP(MF)) {
366 assert(SPAdj == 0 && "Unexpected");
367 // There is alloca()'s in this function, must reference off the frame
368 // pointer instead.
369 FrameReg = getFrameRegister(MF);
370 Offset -= AFI->getFramePtrSpillOffset();
371 }
372
373 unsigned Opcode = MI.getOpcode();
374 const TargetInstrDesc &Desc = MI.getDesc();
375 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
376
377 if (Opcode == ARM::tADDrSPi) {
378 Offset += MI.getOperand(i+1).getImm();
379
380 // Can't use tADDrSPi if it's based off the frame pointer.
381 unsigned NumBits = 0;
382 unsigned Scale = 1;
383 if (FrameReg != ARM::SP) {
384 Opcode = ARM::tADDi3;
385 MI.setDesc(TII.get(ARM::tADDi3));
386 NumBits = 3;
387 } else {
388 NumBits = 8;
389 Scale = 4;
390 assert((Offset & 3) == 0 &&
391 "Thumb add/sub sp, #imm immediate must be multiple of 4!");
392 }
393
394 if (Offset == 0) {
395 // Turn it into a move.
396 MI.setDesc(TII.get(ARM::tMOVhir2lor));
397 MI.getOperand(i).ChangeToRegister(FrameReg, false);
398 MI.RemoveOperand(i+1);
399 return;
400 }
401
402 // Common case: small offset, fits into instruction.
403 unsigned Mask = (1 << NumBits) - 1;
404 if (((Offset / Scale) & ~Mask) == 0) {
405 // Replace the FrameIndex with sp / fp
406 MI.getOperand(i).ChangeToRegister(FrameReg, false);
407 MI.getOperand(i+1).ChangeToImmediate(Offset / Scale);
408 return;
409 }
410
411 unsigned DestReg = MI.getOperand(0).getReg();
412 unsigned Bytes = (Offset > 0) ? Offset : -Offset;
413 unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, NumBits, Scale);
414 // MI would expand into a large number of instructions. Don't try to
415 // simplify the immediate.
416 if (NumMIs > 2) {
417 emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII,
418 *this, dl);
419 MBB.erase(II);
420 return;
421 }
422
423 if (Offset > 0) {
424 // Translate r0 = add sp, imm to
425 // r0 = add sp, 255*4
426 // r0 = add r0, (imm - 255*4)
427 MI.getOperand(i).ChangeToRegister(FrameReg, false);
428 MI.getOperand(i+1).ChangeToImmediate(Mask);
429 Offset = (Offset - Mask * Scale);
430 MachineBasicBlock::iterator NII = next(II);
431 emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII,
432 *this, dl);
433 } else {
434 // Translate r0 = add sp, -imm to
435 // r0 = -imm (this is then translated into a series of instructons)
436 // r0 = add r0, sp
437 emitThumbConstant(MBB, II, DestReg, Offset, TII, *this, dl);
438 MI.setDesc(TII.get(ARM::tADDhirr));
439 MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
440 MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
441 }
442 return;
443 } else {
444 unsigned ImmIdx = 0;
445 int InstrOffs = 0;
446 unsigned NumBits = 0;
447 unsigned Scale = 1;
448 switch (AddrMode) {
449 case ARMII::AddrModeT1_s: {
450 ImmIdx = i+1;
451 InstrOffs = MI.getOperand(ImmIdx).getImm();
452 NumBits = (FrameReg == ARM::SP) ? 8 : 5;
453 Scale = 4;
454 break;
455 }
456 default:
457 LLVM_UNREACHABLE("Unsupported addressing mode!");
458 }
459
460 Offset += InstrOffs * Scale;
461 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
462
463 // Common case: small offset, fits into instruction.
464 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
465 int ImmedOffset = Offset / Scale;
466 unsigned Mask = (1 << NumBits) - 1;
467 if ((unsigned)Offset <= Mask * Scale) {
468 // Replace the FrameIndex with sp
469 MI.getOperand(i).ChangeToRegister(FrameReg, false);
470 ImmOp.ChangeToImmediate(ImmedOffset);
471 return;
472 }
473
474 bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
475 if (AddrMode == ARMII::AddrModeT1_s) {
476 // Thumb tLDRspi, tSTRspi. These will change to instructions that use
477 // a different base register.
478 NumBits = 5;
479 Mask = (1 << NumBits) - 1;
480 }
481 // If this is a thumb spill / restore, we will be using a constpool load to
482 // materialize the offset.
483 if (AddrMode == ARMII::AddrModeT1_s && isThumSpillRestore)
484 ImmOp.ChangeToImmediate(0);
485 else {
486 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
487 ImmedOffset = ImmedOffset & Mask;
488 ImmOp.ChangeToImmediate(ImmedOffset);
489 Offset &= ~(Mask*Scale);
490 }
491 }
492
493 // If we get here, the immediate doesn't fit into the instruction. We folded
494 // as much as possible above, handle the rest, providing a register that is
495 // SP+LargeImm.
496 assert(Offset && "This code isn't needed if offset already handled!");
497
498 if (Desc.mayLoad()) {
499 // Use the destination register to materialize sp + offset.
500 unsigned TmpReg = MI.getOperand(0).getReg();
501 bool UseRR = false;
502 if (Opcode == ARM::tRestore) {
503 if (FrameReg == ARM::SP)
504 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
505 Offset, false, TII, *this, dl);
506 else {
507 emitLoadConstPool(MBB, II, dl, TmpReg, Offset);
508 UseRR = true;
509 }
510 } else
511 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
512 *this, dl);
513 MI.setDesc(TII.get(ARM::tLDR));
514 MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
515 if (UseRR)
516 // Use [reg, reg] addrmode.
517 MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
518 else // tLDR has an extra register operand.
519 MI.addOperand(MachineOperand::CreateReg(0, false));
520 } else if (Desc.mayStore()) {
521 // FIXME! This is horrific!!! We need register scavenging.
522 // Our temporary workaround has marked r3 unavailable. Of course, r3 is
523 // also a ABI register so it's possible that is is the register that is
524 // being storing here. If that's the case, we do the following:
525 // r12 = r2
526 // Use r2 to materialize sp + offset
527 // str r3, r2
528 // r2 = r12
529 unsigned ValReg = MI.getOperand(0).getReg();
530 unsigned TmpReg = ARM::R3;
531 bool UseRR = false;
532 if (ValReg == ARM::R3) {
533 BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
534 .addReg(ARM::R2, RegState::Kill);
535 TmpReg = ARM::R2;
536 }
537 if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
538 BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
539 .addReg(ARM::R3, RegState::Kill);
540 if (Opcode == ARM::tSpill) {
541 if (FrameReg == ARM::SP)
542 emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
543 Offset, false, TII, *this, dl);
544 else {
545 emitLoadConstPool(MBB, II, dl, TmpReg, Offset);
546 UseRR = true;
547 }
548 } else
549 emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
550 *this, dl);
551 MI.setDesc(TII.get(ARM::tSTR));
552 MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
553 if (UseRR) // Use [reg, reg] addrmode.
554 MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
555 else // tSTR has an extra register operand.
556 MI.addOperand(MachineOperand::CreateReg(0, false));
557
558 MachineBasicBlock::iterator NII = next(II);
559 if (ValReg == ARM::R3)
560 BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R2)
561 .addReg(ARM::R12, RegState::Kill);
562 if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
563 BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
564 .addReg(ARM::R12, RegState::Kill);
565 } else
566 assert(false && "Unexpected opcode!");
567 }
568
569 void Thumb2RegisterInfo::emitPrologue(MachineFunction &MF) const {
570 MachineBasicBlock &MBB = MF.front();
571 MachineBasicBlock::iterator MBBI = MBB.begin();
572 MachineFrameInfo *MFI = MF.getFrameInfo();
573 ARMFunctionInfo *AFI = MF.getInfo();
574 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
575 unsigned NumBytes = MFI->getStackSize();
576 const std::vector &CSI = MFI->getCalleeSavedInfo();
577 DebugLoc dl = (MBBI != MBB.end() ?
578 MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
579
580 // Check if R3 is live in. It might have to be used as a scratch register.
581 for (MachineRegisterInfo::livein_iterator I =MF.getRegInfo().livein_begin(),
582 E = MF.getRegInfo().livein_end(); I != E; ++I) {
583 if (I->first == ARM::R3) {
584 AFI->setR3IsLiveIn(true);
585 break;
586 }
587 }
588
589 // Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
590 NumBytes = (NumBytes + 3) & ~3;
591 MFI->setStackSize(NumBytes);
592
593 // Determine the sizes of each callee-save spill areas and record which frame
594 // belongs to which callee-save spill areas.
595 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
596 int FramePtrSpillFI = 0;
597
598 if (VARegSaveSize)
599 emitSPUpdate(MBB, MBBI, TII, dl, *this, -VARegSaveSize);
600
601 if (!AFI->hasStackFrame()) {
602 if (NumBytes != 0)
603 emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
604 return;
605 }
606
607 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
608 unsigned Reg = CSI[i].getReg();
609 int FI = CSI[i].getFrameIdx();
610 switch (Reg) {
611 case ARM::R4:
612 case ARM::R5:
613 case ARM::R6:
614 case ARM::R7:
615 case ARM::LR:
616 if (Reg == FramePtr)
617 FramePtrSpillFI = FI;
618 AFI->addGPRCalleeSavedArea1Frame(FI);
619 GPRCS1Size += 4;
620 break;
621 case ARM::R8:
622 case ARM::R9:
623 case ARM::R10:
624 case ARM::R11:
625 if (Reg == FramePtr)
626 FramePtrSpillFI = FI;
627 if (STI.isTargetDarwin()) {
628 AFI->addGPRCalleeSavedArea2Frame(FI);
629 GPRCS2Size += 4;
630 } else {
631 AFI->addGPRCalleeSavedArea1Frame(FI);
632 GPRCS1Size += 4;
633 }
634 break;
635 default:
636 AFI->addDPRCalleeSavedAreaFrame(FI);
637 DPRCSSize += 8;
638 }
639 }
640
641 if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
642 ++MBBI;
643 if (MBBI != MBB.end())
644 dl = MBBI->getDebugLoc();
645 }
646
647 // Darwin ABI requires FP to point to the stack slot that contains the
648 // previous FP.
649 if (STI.isTargetDarwin() || hasFP(MF)) {
650 MachineInstrBuilder MIB =
651 BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
652 .addFrameIndex(FramePtrSpillFI).addImm(0);
653 }
654
655 // Determine starting offsets of spill areas.
656 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
657 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
658 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
659 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
660 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
661 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
662 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
663
664 NumBytes = DPRCSOffset;
665 if (NumBytes) {
666 // Insert it after all the callee-save spills.
667 emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
668 }
669
670 if (STI.isTargetELF() && hasFP(MF)) {
671 MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
672 AFI->getFramePtrSpillOffset());
673 }
674
675 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
676 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
677 AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
678 }
679
680 static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
681 for (unsigned i = 0; CSRegs[i]; ++i)
682 if (Reg == CSRegs[i])
683 return true;
58 bool Thumb2RegisterInfo::
59 requiresRegisterScavenging(const MachineFunction &MF) const {
60 // FIXME
68461 return false;
68562 }
686
687 static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
688 return (MI->getOpcode() == ARM::tRestore &&
689 MI->getOperand(1).isFI() &&
690 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
691 }
692
693 void Thumb2RegisterInfo::emitEpilogue(MachineFunction &MF,
694 MachineBasicBlock &MBB) const {
695 MachineBasicBlock::iterator MBBI = prior(MBB.end());
696 assert((MBBI->getOpcode() == ARM::tBX_RET ||
697 MBBI->getOpcode() == ARM::tPOP_RET) &&
698 "Can only insert epilog into returning blocks");
699 DebugLoc dl = MBBI->getDebugLoc();
700 MachineFrameInfo *MFI = MF.getFrameInfo();
701 ARMFunctionInfo *AFI = MF.getInfo();
702 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
703 int NumBytes = (int)MFI->getStackSize();
704
705 if (!AFI->hasStackFrame()) {
706 if (NumBytes != 0)
707 emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
708 } else {
709 // Unwind MBBI to point to first LDR / FLDD.
710 const unsigned *CSRegs = getCalleeSavedRegs();
711 if (MBBI != MBB.begin()) {
712 do
713 --MBBI;
714 while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
715 if (!isCSRestore(MBBI, CSRegs))
716 ++MBBI;
717 }
718
719 // Move SP to start of FP callee save spill area.
720 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
721 AFI->getGPRCalleeSavedArea2Size() +
722 AFI->getDPRCalleeSavedAreaSize());
723
724 if (hasFP(MF)) {
725 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
726 // Reset SP based on frame pointer only if the stack frame extends beyond
727 // frame pointer stack slot or target is ELF and the function has FP.
728 if (NumBytes)
729 emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
730 TII, *this, dl);
731 else
732 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::SP)
733 .addReg(FramePtr);
734 } else {
735 if (MBBI->getOpcode() == ARM::tBX_RET &&
736 &MBB.front() != MBBI &&
737 prior(MBBI)->getOpcode() == ARM::tPOP) {
738 MachineBasicBlock::iterator PMBBI = prior(MBBI);
739 emitSPUpdate(MBB, PMBBI, TII, dl, *this, NumBytes);
740 } else
741 emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
742 }
743 }
744
745 if (VARegSaveSize) {
746 // Epilogue for vararg functions: pop LR to R3 and branch off it.
747 // FIXME: Verify this is still ok when R3 is no longer being reserved.
748 BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)).addReg(ARM::R3);
749
750 emitSPUpdate(MBB, MBBI, TII, dl, *this, VARegSaveSize);
751
752 BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);
753 MBB.erase(MBBI);
754 }
755 }
3535 ARMCC::CondCodes Pred = ARMCC::AL,
3636 unsigned PredReg = 0) const;
3737
38 /// Code Generation virtual methods...
39 const TargetRegisterClass *
40 getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const;
41
4238 bool requiresRegisterScavenging(const MachineFunction &MF) const;
43
44 bool hasReservedCallFrame(MachineFunction &MF) const;
45
46 void eliminateCallFramePseudoInstr(MachineFunction &MF,
47 MachineBasicBlock &MBB,
48 MachineBasicBlock::iterator I) const;
49
50 void eliminateFrameIndex(MachineBasicBlock::iterator II,
51 int SPAdj, RegScavenger *RS = NULL) const;
52
53 void emitPrologue(MachineFunction &MF) const;
54 void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
5539 };
5640 }
5741
None ; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep smmul | count 1
0 ; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep smull | count 1
11 ; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep umull | count 1
22
33 define i32 @smulhi(i32 %x, i32 %y) {
None ; XFAIL: *
1 ; fixme
2 ; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mov | count 3
31 ; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mvn | count 1
42