llvm.org GIT mirror llvm / fc989e1
Reduce the number of arguments in the instruction builder and make some improvements on instruction selection that account for register and frame index bases. Patch contributed by Jeff Cohen. Thanks Jeff! git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@16110 91177308-0d34-0410-b5e6-96231b3b80d8 Reid Spencer 16 years ago
2 changed file(s) with 138 addition(s) and 161 deletion(s). Raw diff Collapse all Expand all
236236
237237 /// getAddressingMode - Get the addressing mode to use to address the
238238 /// specified value. The returned value should be used with addFullAddress.
239 void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
240 unsigned &IndexReg, unsigned &Disp);
239 void getAddressingMode(Value *Addr, X86AddressMode &AM);
241240
242241
243242 /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
244243 /// expressions.
245244 void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
246245 std::vector &GEPOps,
247 std::vector &GEPTypes, unsigned &BaseReg,
248 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
246 std::vector &GEPTypes,
247 X86AddressMode &AM);
249248
250249 /// isGEPFoldable - Return true if the specified GEP can be completely
251250 /// folded into the addressing mode of a load/store or lea instruction.
252251 bool isGEPFoldable(MachineBasicBlock *MBB,
253252 Value *Src, User::op_iterator IdxBegin,
254 User::op_iterator IdxEnd, unsigned &BaseReg,
255 unsigned &Scale, unsigned &IndexReg, unsigned &Disp);
253 User::op_iterator IdxEnd, X86AddressMode &AM);
256254
257255 /// emitGEPOperation - Common code shared between visitGetElementPtrInst and
258256 /// constant expression GEP support.
811809 }
812810
813811
814 void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
815 unsigned &IndexReg, unsigned &Disp) {
816 BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
812 void ISel::getAddressingMode(Value *Addr, X86AddressMode &AM) {
813 AM.BaseType = X86AddressMode::RegBase;
814 AM.Base.Reg = 0; AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
817815 if (GetElementPtrInst *GEP = dyn_cast(Addr)) {
818816 if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
819 BaseReg, Scale, IndexReg, Disp))
817 AM))
820818 return;
821819 } else if (ConstantExpr *CE = dyn_cast(Addr)) {
822820 if (CE->getOpcode() == Instruction::GetElementPtr)
823821 if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
824 BaseReg, Scale, IndexReg, Disp))
822 AM))
825823 return;
824 } else if (AllocaInst *AI = dyn_castFixedAlloca(Addr)) {
825 AM.BaseType = X86AddressMode::FrameIndexBase;
826 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
827 return;
826828 }
827829
828830 // If it's not foldable, reset addr mode.
829 BaseReg = getReg(Addr);
830 Scale = 1; IndexReg = 0; Disp = 0;
831 AM.BaseType = X86AddressMode::RegBase;
832 AM.Base.Reg = getReg(Addr);
833 AM.Scale = 1; AM.IndexReg = 0; AM.Disp = 0;
831834 }
832835
833836 // canFoldSetCCIntoBranchOrSelect - Return the setcc instruction if we can fold
19941997 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), FI);
19951998
19961999 } else {
1997 unsigned BaseReg, Scale, IndexReg, Disp;
1998 getAddressingMode(cast(Op1)->getOperand(0), BaseReg,
1999 Scale, IndexReg, Disp);
2000 X86AddressMode AM;
2001 getAddressingMode(cast(Op1)->getOperand(0), AM);
20002002
2001 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r),
2002 BaseReg, Scale, IndexReg, Disp);
2003 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op0r), AM);
20032004 }
20042005 return;
20052006 }
20192020 unsigned FI = getFixedSizedAllocaFI(AI);
20202021 addFrameReference(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), FI);
20212022 } else {
2022 unsigned BaseReg, Scale, IndexReg, Disp;
2023 getAddressingMode(cast(Op0)->getOperand(0), BaseReg,
2024 Scale, IndexReg, Disp);
2023 X86AddressMode AM;
2024 getAddressingMode(cast(Op0)->getOperand(0), AM);
20252025
2026 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r),
2027 BaseReg, Scale, IndexReg, Disp);
2026 addFullAddress(BuildMI(BB, Opcode, 5, DestReg).addReg(Op1r), AM);
20282027 }
20292028 return;
20302029 }
23512350 case 5:
23522351 case 9:
23532352 if (Class == cInt) {
2354 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg),
2355 op0Reg, ConstRHS-1, op0Reg, 0);
2353 X86AddressMode AM;
2354 AM.BaseType = X86AddressMode::RegBase;
2355 AM.Base.Reg = op0Reg;
2356 AM.Scale = ConstRHS-1;
2357 AM.IndexReg = op0Reg;
2358 AM.Disp = 0;
2359 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, DestReg), AM);
23562360 return;
23572361 }
23582362 case -3:
23602364 case -9:
23612365 if (Class == cInt) {
23622366 TmpReg = makeAnotherReg(DestTy);
2363 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg),
2364 op0Reg, -ConstRHS-1, op0Reg, 0);
2367 X86AddressMode AM;
2368 AM.BaseType = X86AddressMode::RegBase;
2369 AM.Base.Reg = op0Reg;
2370 AM.Scale = -ConstRHS-1;
2371 AM.IndexReg = op0Reg;
2372 AM.Disp = 0;
2373 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TmpReg), AM);
23652374 BuildMI(*MBB, IP, NEGrTab[Class], 1, DestReg).addReg(TmpReg);
23662375 return;
23672376 }
24432452 unsigned FI = getFixedSizedAllocaFI(AI);
24442453 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
24452454 } else {
2446 unsigned BaseReg, Scale, IndexReg, Disp;
2447 getAddressingMode(LI->getOperand(0), BaseReg,
2448 Scale, IndexReg, Disp);
2455 X86AddressMode AM;
2456 getAddressingMode(LI->getOperand(0), AM);
24492457
2450 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
2451 BaseReg, Scale, IndexReg, Disp);
2458 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
24522459 }
24532460 return;
24542461 }
25872594 unsigned FI = getFixedSizedAllocaFI(AI);
25882595 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), FI);
25892596 } else {
2590 unsigned BaseReg, Scale, IndexReg, Disp;
2591 getAddressingMode(LI->getOperand(0), BaseReg,
2592 Scale, IndexReg, Disp);
2597 X86AddressMode AM;
2598 getAddressingMode(LI->getOperand(0), AM);
25932599
2594 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r),
2595 BaseReg, Scale, IndexReg, Disp);
2600 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op0r), AM);
25962601 }
25972602 return;
25982603 }
26082613 unsigned FI = getFixedSizedAllocaFI(AI);
26092614 addFrameReference(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), FI);
26102615 } else {
2611 unsigned BaseReg, Scale, IndexReg, Disp;
2612 getAddressingMode(LI->getOperand(0), BaseReg, Scale, IndexReg, Disp);
2613 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r),
2614 BaseReg, Scale, IndexReg, Disp);
2616 X86AddressMode AM;
2617 getAddressingMode(LI->getOperand(0), AM);
2618 addFullAddress(BuildMI(BB, Opcode, 5, ResultReg).addReg(Op1r), AM);
26152619 }
26162620 return;
26172621 }
29372941 unsigned FI = getFixedSizedAllocaFI(AI);
29382942 addFrameReference(BuildMI(BB, Opcode[Class], 4, DestReg), FI);
29392943 } else {
2940 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
2941 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
2942 addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg),
2943 BaseReg, Scale, IndexReg, Disp);
2944 X86AddressMode AM;
2945 getAddressingMode(I.getOperand(0), AM);
2946 addFullAddress(BuildMI(BB, Opcode[Class], 4, DestReg), AM);
29442947 }
29452948 return;
29462949 } else {
30103013 addFrameReference(BuildMI(BB, Opcode, 4, DestReg), FI);
30113014 }
30123015 } else {
3013 unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
3014 getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
3016 X86AddressMode AM;
3017 getAddressingMode(I.getOperand(0), AM);
30153018
30163019 if (Class == cLong) {
3017 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg),
3018 BaseReg, Scale, IndexReg, Disp);
3019 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1),
3020 BaseReg, Scale, IndexReg, Disp+4);
3020 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg), AM);
3021 AM.Disp += 4;
3022 addFullAddress(BuildMI(BB, X86::MOV32rm, 4, DestReg+1), AM);
30213023 } else {
3022 addFullAddress(BuildMI(BB, Opcode, 4, DestReg),
3023 BaseReg, Scale, IndexReg, Disp);
3024 addFullAddress(BuildMI(BB, Opcode, 4, DestReg), AM);
30243025 }
30253026 }
30263027 }
30293030 /// instruction.
30303031 ///
30313032 void ISel::visitStoreInst(StoreInst &I) {
3032 unsigned BaseReg = ~0U, Scale = ~0U, IndexReg = ~0U, Disp = ~0U;
3033 unsigned AllocaFrameIdx = ~0U;
3034
3035 if (AllocaInst *AI = dyn_castFixedAlloca(I.getOperand(1)))
3036 AllocaFrameIdx = getFixedSizedAllocaFI(AI);
3037 else
3038 getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
3033 X86AddressMode AM;
3034 getAddressingMode(I.getOperand(1), AM);
30393035
30403036 const Type *ValTy = I.getOperand(0)->getType();
30413037 unsigned Class = getClassB(ValTy);
30433039 if (ConstantInt *CI = dyn_cast(I.getOperand(0))) {
30443040 uint64_t Val = CI->getRawValue();
30453041 if (Class == cLong) {
3046 if (AllocaFrameIdx != ~0U) {
3047 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3048 AllocaFrameIdx).addImm(Val & ~0U);
3049 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3050 AllocaFrameIdx, 4).addImm(Val>>32);
3051 } else {
3052 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3053 BaseReg, Scale, IndexReg, Disp).addImm(Val & ~0U);
3054 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3055 BaseReg, Scale, IndexReg, Disp+4).addImm(Val>>32);
3056 }
3042 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val & ~0U);
3043 AM.Disp += 4;
3044 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(Val>>32);
30573045 } else {
30583046 static const unsigned Opcodes[] = {
30593047 X86::MOV8mi, X86::MOV16mi, X86::MOV32mi
30603048 };
30613049 unsigned Opcode = Opcodes[Class];
3062 if (AllocaFrameIdx != ~0U)
3063 addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addImm(Val);
3064 else
3065 addFullAddress(BuildMI(BB, Opcode, 5),
3066 BaseReg, Scale, IndexReg, Disp).addImm(Val);
3050 addFullAddress(BuildMI(BB, Opcode, 5), AM).addImm(Val);
30673051 }
30683052 } else if (isa(I.getOperand(0))) {
3069 if (AllocaFrameIdx != ~0U)
3070 addFrameReference(BuildMI(BB, X86::MOV32mi, 5), AllocaFrameIdx).addImm(0);
3071 else
3072 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3073 BaseReg, Scale, IndexReg, Disp).addImm(0);
3074
3053 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(0);
30753054 } else if (ConstantBool *CB = dyn_cast(I.getOperand(0))) {
3076 if (AllocaFrameIdx != ~0U)
3077 addFrameReference(BuildMI(BB, X86::MOV8mi, 5),
3078 AllocaFrameIdx).addImm(CB->getValue());
3079 else
3080 addFullAddress(BuildMI(BB, X86::MOV8mi, 5),
3081 BaseReg, Scale, IndexReg, Disp).addImm(CB->getValue());
3055 addFullAddress(BuildMI(BB, X86::MOV8mi, 5), AM).addImm(CB->getValue());
30823056 } else if (ConstantFP *CFP = dyn_cast(I.getOperand(0))) {
30833057 // Store constant FP values with integer instructions to avoid having to
30843058 // load the constants from the constant pool then do a store.
30883062 float F;
30893063 } V;
30903064 V.F = CFP->getValue();
3091 if (AllocaFrameIdx != ~0U)
3092 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3093 AllocaFrameIdx).addImm(V.I);
3094 else
3095 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3096 BaseReg, Scale, IndexReg, Disp).addImm(V.I);
3065 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(V.I);
30973066 } else {
30983067 union {
30993068 uint64_t I;
31003069 double F;
31013070 } V;
31023071 V.F = CFP->getValue();
3103 if (AllocaFrameIdx != ~0U) {
3104 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3105 AllocaFrameIdx).addImm((unsigned)V.I);
3106 addFrameReference(BuildMI(BB, X86::MOV32mi, 5),
3107 AllocaFrameIdx, 4).addImm(unsigned(V.I >> 32));
3108 } else {
3109 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3110 BaseReg, Scale, IndexReg, Disp).addImm((unsigned)V.I);
3111 addFullAddress(BuildMI(BB, X86::MOV32mi, 5),
3112 BaseReg, Scale, IndexReg, Disp+4).addImm(
3072 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm((unsigned)V.I);
3073 AM.Disp += 4;
3074 addFullAddress(BuildMI(BB, X86::MOV32mi, 5), AM).addImm(
31133075 unsigned(V.I >> 32));
3114 }
31153076 }
31163077
31173078 } else if (Class == cLong) {
31183079 unsigned ValReg = getReg(I.getOperand(0));
3119 if (AllocaFrameIdx != ~0U) {
3120 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
3121 AllocaFrameIdx).addReg(ValReg);
3122 addFrameReference(BuildMI(BB, X86::MOV32mr, 5),
3123 AllocaFrameIdx, 4).addReg(ValReg+1);
3124 } else {
3125 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
3126 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
3127 addFullAddress(BuildMI(BB, X86::MOV32mr, 5),
3128 BaseReg, Scale, IndexReg, Disp+4).addReg(ValReg+1);
3129 }
3080 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg);
3081 AM.Disp += 4;
3082 addFullAddress(BuildMI(BB, X86::MOV32mr, 5), AM).addReg(ValReg+1);
31303083 } else {
31313084 unsigned ValReg = getReg(I.getOperand(0));
31323085 static const unsigned Opcodes[] = {
31353088 unsigned Opcode = Opcodes[Class];
31363089 if (ValTy == Type::DoubleTy) Opcode = X86::FST64m;
31373090
3138 if (AllocaFrameIdx != ~0U)
3139 addFrameReference(BuildMI(BB, Opcode, 5), AllocaFrameIdx).addReg(ValReg);
3140 else
3141 addFullAddress(BuildMI(BB, Opcode, 1+4),
3142 BaseReg, Scale, IndexReg, Disp).addReg(ValReg);
3091 addFullAddress(BuildMI(BB, Opcode, 1+4), AM).addReg(ValReg);
31433092 }
31443093 }
31453094
35383487 void ISel::visitGetElementPtrInst(GetElementPtrInst &I) {
35393488 // If this GEP instruction will be folded into all of its users, we don't need
35403489 // to explicitly calculate it!
3541 unsigned A, B, C, D;
3542 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), A,B,C,D)) {
3490 X86AddressMode AM;
3491 if (isGEPFoldable(0, I.getOperand(0), I.op_begin()+1, I.op_end(), AM)) {
35433492 // Check all of the users of the instruction to see if they are loads and
35443493 // stores.
35453494 bool AllWillFold = true;
35743523 ///
35753524 void ISel::getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
35763525 std::vector &GEPOps,
3577 std::vector &GEPTypes, unsigned &BaseReg,
3578 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3526 std::vector &GEPTypes,
3527 X86AddressMode &AM) {
35793528 const TargetData &TD = TM.getTargetData();
35803529
35813530 // Clear out the state we are working with...
3582 BaseReg = 0; // No base register
3583 Scale = 1; // Unit scale
3584 IndexReg = 0; // No index register
3585 Disp = 0; // No displacement
3531 AM.BaseType = X86AddressMode::RegBase;
3532 AM.Base.Reg = 0; // No base register
3533 AM.Scale = 1; // Unit scale
3534 AM.IndexReg = 0; // No index register
3535 AM.Disp = 0; // No displacement
35863536
35873537 // While there are GEP indexes that can be folded into the current address,
35883538 // keep processing them.
35963546 // structure is in memory. Since the structure index must be constant, we
35973547 // can get its value and use it to find the right byte offset from the
35983548 // StructLayout class's list of structure member offsets.
3599 Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
3549 AM.Disp += TD.getStructLayout(StTy)->MemberOffsets[CUI->getValue()];
36003550 GEPOps.pop_back(); // Consume a GEP operand
36013551 GEPTypes.pop_back();
36023552 } else {
36113561 // If idx is a constant, fold it into the offset.
36123562 unsigned TypeSize = TD.getTypeSize(SqTy->getElementType());
36133563 if (ConstantSInt *CSI = dyn_cast(idx)) {
3614 Disp += TypeSize*CSI->getValue();
3564 AM.Disp += TypeSize*CSI->getValue();
36153565 } else if (ConstantUInt *CUI = dyn_cast(idx)) {
3616 Disp += TypeSize*CUI->getValue();
3566 AM.Disp += TypeSize*CUI->getValue();
36173567 } else {
36183568 // If the index reg is already taken, we can't handle this index.
3619 if (IndexReg) return;
3569 if (AM.IndexReg) return;
36203570
36213571 // If this is a size that we can handle, then add the index as
36223572 switch (TypeSize) {
36233573 case 1: case 2: case 4: case 8:
36243574 // These are all acceptable scales on X86.
3625 Scale = TypeSize;
3575 AM.Scale = TypeSize;
36263576 break;
36273577 default:
36283578 // Otherwise, we can't handle this scale
36343584 CI->getOperand(0)->getType() == Type::UIntTy)
36353585 idx = CI->getOperand(0);
36363586
3637 IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
3587 AM.IndexReg = MBB ? getReg(idx, MBB, IP) : 1;
36383588 }
36393589
36403590 GEPOps.pop_back(); // Consume a GEP operand
36453595 // GEPTypes is empty, which means we have a single operand left. Set it as
36463596 // the base register.
36473597 //
3648 assert(BaseReg == 0);
3598 assert(AM.Base.Reg == 0);
3599
3600 if (AllocaInst *AI = dyn_castFixedAlloca(GEPOps.back())) {
3601 AM.BaseType = X86AddressMode::FrameIndexBase;
3602 AM.Base.FrameIndex = getFixedSizedAllocaFI(AI);
3603 GEPOps.pop_back();
3604 return;
3605 }
36493606
36503607 #if 0 // FIXME: TODO!
3651 if (AllocaInst *AI = dyn_castFixedAlloca(V)) {
3652 // FIXME: When we can add FrameIndex values as the first operand, we can
3653 // make GEP's of allocas MUCH more efficient!
3654 unsigned FI = getFixedSizedAllocaFI(AI);
3655 GEPOps.pop_back();
3656 return;
3657 } else if (GlobalValue *GV = dyn_cast(V)) {
3608 if (GlobalValue *GV = dyn_cast(V)) {
36583609 // FIXME: When addressing modes are more powerful/correct, we could load
36593610 // global addresses directly as 32-bit immediates.
36603611 }
36613612 #endif
36623613
3663 BaseReg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
3614 AM.Base.Reg = MBB ? getReg(GEPOps[0], MBB, IP) : 1;
36643615 GEPOps.pop_back(); // Consume the last GEP operand
36653616 }
36663617
36693620 /// folded into the addressing mode of a load/store or lea instruction.
36703621 bool ISel::isGEPFoldable(MachineBasicBlock *MBB,
36713622 Value *Src, User::op_iterator IdxBegin,
3672 User::op_iterator IdxEnd, unsigned &BaseReg,
3673 unsigned &Scale, unsigned &IndexReg, unsigned &Disp) {
3623 User::op_iterator IdxEnd, X86AddressMode &AM) {
36743624
36753625 std::vector GEPOps;
36763626 GEPOps.resize(IdxEnd-IdxBegin+1);
36833633
36843634 MachineBasicBlock::iterator IP;
36853635 if (MBB) IP = MBB->end();
3686 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3636 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
36873637
36883638 // We can fold it away iff the getGEPIndex call eliminated all operands.
36893639 return GEPOps.empty();
37223672 // Keep emitting instructions until we consume the entire GEP instruction.
37233673 while (!GEPOps.empty()) {
37243674 unsigned OldSize = GEPOps.size();
3725 unsigned BaseReg, Scale, IndexReg, Disp;
3726 getGEPIndex(MBB, IP, GEPOps, GEPTypes, BaseReg, Scale, IndexReg, Disp);
3675 X86AddressMode AM;
3676 getGEPIndex(MBB, IP, GEPOps, GEPTypes, AM);
37273677
37283678 if (GEPOps.size() != OldSize) {
37293679 // getGEPIndex consumed some of the input. Build an LEA instruction here.
37303680 unsigned NextTarget = 0;
37313681 if (!GEPOps.empty()) {
3732 assert(BaseReg == 0 &&
3682 assert(AM.Base.Reg == 0 &&
37333683 "getGEPIndex should have left the base register open for chaining!");
3734 NextTarget = BaseReg = makeAnotherReg(Type::UIntTy);
3684 NextTarget = AM.Base.Reg = makeAnotherReg(Type::UIntTy);
37353685 }
37363686
3737 if (IndexReg == 0 && Disp == 0)
3738 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(BaseReg);
3687 if (AM.BaseType == X86AddressMode::RegBase &&
3688 AM.IndexReg == 0 && AM.Disp == 0)
3689 BuildMI(*MBB, IP, X86::MOV32rr, 1, TargetReg).addReg(AM.Base.Reg);
37393690 else
3740 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg),
3741 BaseReg, Scale, IndexReg, Disp);
3691 addFullAddress(BuildMI(*MBB, IP, X86::LEA32r, 5, TargetReg), AM);
37423692 --IP;
37433693 TargetReg = NextTarget;
37443694 } else if (GEPTypes.empty()) {
2727
2828 namespace llvm {
2929
30 /// X86AddressMode - This struct holds a generalized full x86 address mode.
31 /// The base register can be a frame index, which will eventually be replaced
32 /// with BP or SP and Disp being offsetted accordingly.
33 /// FIXME: add support for globals as a new base type.
34 struct X86AddressMode {
35 enum {
36 UnknownBase,
37 RegBase,
38 FrameIndexBase
39 } BaseType;
40
41 union {
42 unsigned Reg;
43 int FrameIndex;
44 } Base;
45
46 unsigned Scale;
47 unsigned IndexReg;
48 unsigned Disp;
49
50 X86AddressMode() : BaseType(UnknownBase) {}
51 };
52
3053 /// addDirectMem - This function is used to add a direct memory reference to the
3154 /// current instruction -- that is, a dereference of an address in a register,
3255 /// with no scale, index or displacement. An example is: DWORD PTR [EAX].
4972 }
5073
5174 inline const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
52 unsigned BaseReg,
53 unsigned Scale,
54 unsigned IndexReg,
55 unsigned Disp) {
56 assert (Scale == 1 || Scale == 2 || Scale == 4 || Scale == 8);
57 return MIB.addReg(BaseReg).addZImm(Scale).addReg(IndexReg).addSImm(Disp);
75 const X86AddressMode &AM) {
76 assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
77
78 if (AM.BaseType == X86AddressMode::RegBase)
79 MIB.addReg(AM.Base.Reg);
80 else if (AM.BaseType == X86AddressMode::FrameIndexBase)
81 MIB.addFrameIndex(AM.Base.FrameIndex);
82 else
83 assert (0);
84 return MIB.addZImm(AM.Scale).addReg(AM.IndexReg).addSImm(AM.Disp);
5885 }
5986
6087 /// addFrameReference - This function is used to add a reference to the base of