llvm.org GIT mirror llvm / 7ce4578
Matches MachineInstr changes. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@31712 91177308-0d34-0410-b5e6-96231b3b80d8 Evan Cheng 13 years ago
17 changed file(s) with 123 addition(s) and 120 deletion(s). Raw diff Collapse all Expand all
1717 #define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
1818
1919 #include "llvm/CodeGen/MachineBasicBlock.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/Target/TargetMachine.h"
2022
2123 namespace llvm {
2224
3234
3335 /// addReg - Add a new virtual register operand...
3436 ///
35 const MachineInstrBuilder &addReg(int RegNo, bool isDef = false,
36 bool isImp = false) const {
37 const
38 MachineInstrBuilder &addReg(int RegNo, bool isDef = false, bool isImp = false,
39 bool isKill = false, bool isDead = false) const {
3740 MI->addRegOperand(RegNo, isDef, isImp);
3841 return *this;
3942 }
7679 MI->addExternalSymbolOperand(FnName);
7780 return *this;
7881 }
79
80 const MachineInstrBuilder &addImplicitDefsUses() const {
81 MI->addImplicitDefUseOperands();
82 return *this;
83 }
8482 };
8583
8684 /// BuildMI - Builder interface. Specify how to create the initial instruction
8785 /// itself. NumOperands is the number of operands to the machine instruction to
8886 /// allow for memory efficient representation of machine instructions.
8987 ///
90 inline MachineInstrBuilder BuildMI(int Opcode, unsigned NumOperands) {
91 return MachineInstrBuilder(new MachineInstr(Opcode, NumOperands));
88 inline MachineInstrBuilder BuildMI(const TargetInstrInfo &TII, int Opcode,
89 unsigned NumOperands) {
90 return MachineInstrBuilder(new MachineInstr(TII, Opcode, NumOperands));
9291 }
9392
9493 /// BuildMI - This version of the builder sets up the first operand as a
9594 /// destination virtual register. NumOperands is the number of additional add*
9695 /// calls that are expected, not including the destination register.
9796 ///
98 inline MachineInstrBuilder
99 BuildMI(int Opcode, unsigned NumOperands, unsigned DestReg) {
100 return MachineInstrBuilder(new MachineInstr(Opcode, NumOperands+1))
97 inline MachineInstrBuilder BuildMI(const TargetInstrInfo &TII, int Opcode,
98 unsigned NumOperands, unsigned DestReg) {
99 return MachineInstrBuilder(new MachineInstr(TII, Opcode, NumOperands+1))
101100 .addReg(DestReg, true);
102101 }
103102
111110 MachineBasicBlock::iterator I,
112111 int Opcode, unsigned NumOperands,
113112 unsigned DestReg) {
114 MachineInstr *MI = new MachineInstr(Opcode, NumOperands+1);
113 MachineInstr *MI = new MachineInstr(*BB.getParent()->getTarget().
114 getInstrInfo(), Opcode, NumOperands+1);
115115 BB.insert(I, MI);
116116 return MachineInstrBuilder(MI).addReg(DestReg, true);
117117 }
123123 inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
124124 MachineBasicBlock::iterator I,
125125 int Opcode, unsigned NumOperands) {
126 MachineInstr *MI = new MachineInstr(Opcode, NumOperands);
126 MachineInstr *MI = new MachineInstr(*BB.getParent()->getTarget().
127 getInstrInfo(), Opcode, NumOperands);
127128 BB.insert(I, MI);
128129 return MachineInstrBuilder(MI);
129130 }
394394 #endif
395395
396396 // Create the new machine instruction.
397 MachineInstr *MI = new MachineInstr(Opc, NumMIOperands);
397 MachineInstr *MI = new MachineInstr(*TII, Opc, NumMIOperands);
398398
399399 // Add result register values for things that are defined by this
400400 // instruction.
439439 }
440440 }
441441 }
442
443 // Emit implicit def / use operands.
444 MI->addImplicitDefUseOperands();
445442
446443 // Now that we have emitted all operands, emit this instruction itself.
447444 if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) {
1818 using namespace llvm;
1919
2020 AlphaInstrInfo::AlphaInstrInfo()
21 : TargetInstrInfo(AlphaInsts, sizeof(AlphaInsts)/sizeof(AlphaInsts[0])) { }
21 : TargetInstrInfo(AlphaInsts, sizeof(AlphaInsts)/sizeof(AlphaInsts[0])),
22 RI(*this) { }
2223
2324
2425 bool AlphaInstrInfo::isMoveInstr(const MachineInstr& MI,
5050 return l - h * IMM_MULT;
5151 }
5252
53 AlphaRegisterInfo::AlphaRegisterInfo()
54 : AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP)
53 AlphaRegisterInfo::AlphaRegisterInfo(const TargetInstrInfo &tii)
54 : AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
55 TII(tii)
5556 {
5657 }
5758
113114 unsigned InReg = MI->getOperand(1).getReg();
114115 Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
115116 ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
116 return BuildMI(Opc, 3).addReg(InReg).addFrameIndex(FrameIndex)
117 return BuildMI(TII, Opc, 3).addReg(InReg).addFrameIndex(FrameIndex)
117118 .addReg(Alpha::F31);
118119 } else { // load -> move
119120 unsigned OutReg = MI->getOperand(0).getReg();
120121 Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
121122 ((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
122 return BuildMI(Opc, 2, OutReg).addFrameIndex(FrameIndex)
123 return BuildMI(TII, Opc, 2, OutReg).addFrameIndex(FrameIndex)
123124 .addReg(Alpha::F31);
124125 }
125126 }
204205
205206 MachineInstr *New;
206207 if (Old->getOpcode() == Alpha::ADJUSTSTACKDOWN) {
207 New=BuildMI(Alpha::LDA, 2, Alpha::R30)
208 New=BuildMI(TII, Alpha::LDA, 2, Alpha::R30)
208209 .addImm(-Amount).addReg(Alpha::R30);
209210 } else {
210211 assert(Old->getOpcode() == Alpha::ADJUSTSTACKUP);
211 New=BuildMI(Alpha::LDA, 2, Alpha::R30)
212 New=BuildMI(TII, Alpha::LDA, 2, Alpha::R30)
212213 .addImm(Amount).addReg(Alpha::R30);
213214 }
214215
265266 MI.getOperand(i + 1).ChangeToRegister(Alpha::R28, false);
266267 MI.getOperand(i).ChangeToImmediate(getLower16(Offset));
267268 //insert the new
268 MachineInstr* nMI=BuildMI(Alpha::LDAH, 2, Alpha::R28)
269 MachineInstr* nMI=BuildMI(TII, Alpha::LDAH, 2, Alpha::R28)
269270 .addImm(getUpper16(Offset)).addReg(FP ? Alpha::R15 : Alpha::R30);
270271 MBB.insert(II, nMI);
271272 } else {
2121 class Type;
2222
2323 struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
24 AlphaRegisterInfo();
24 const TargetInstrInfo &TII;
25
26 AlphaRegisterInfo(const TargetInstrInfo &tii);
2527
2628 /// Code Generation virtual methods...
2729 void storeRegToStackSlot(MachineBasicBlock &MBB,
120120 void IA64RegisterInfo::
121121 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
122122 MachineBasicBlock::iterator I) const {
123
123 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
124124 if (hasFP(MF)) {
125125 // If we have a frame pointer, turn the adjcallstackup instruction into a
126126 // 'sub SP, ' and the adjcallstackdown instruction into 'add SP,
136136
137137 MachineInstr *New;
138138 if (Old->getOpcode() == IA64::ADJUSTCALLSTACKDOWN) {
139 New=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
139 New=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
140140 .addImm(-Amount);
141141 } else {
142142 assert(Old->getOpcode() == IA64::ADJUSTCALLSTACKUP);
143 New=BuildMI(IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
143 New=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r12).addReg(IA64::r12)
144144 .addImm(Amount);
145145 }
146146
157157 MachineInstr &MI = *II;
158158 MachineBasicBlock &MBB = *MI.getParent();
159159 MachineFunction &MF = *MBB.getParent();
160 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
160161
161162 bool FP = hasFP(MF);
162163
185186 // Fix up the old:
186187 MI.getOperand(i).ChangeToRegister(IA64::r22, false);
187188 //insert the new
188 MachineInstr* nMI=BuildMI(IA64::ADDIMM22, 2, IA64::r22)
189 MachineInstr* nMI=BuildMI(TII, IA64::ADDIMM22, 2, IA64::r22)
189190 .addReg(BaseRegister).addImm(Offset);
190191 MBB.insert(II, nMI);
191192 } else { // it's big
192193 //fix up the old:
193194 MI.getOperand(i).ChangeToRegister(IA64::r22, false);
194195 MachineInstr* nMI;
195 nMI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(Offset);
196 nMI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(Offset);
196197 MBB.insert(II, nMI);
197 nMI=BuildMI(IA64::ADD, 2, IA64::r22).addReg(BaseRegister)
198 nMI=BuildMI(TII, IA64::ADD, 2, IA64::r22).addReg(BaseRegister)
198199 .addReg(IA64::r22);
199200 MBB.insert(II, nMI);
200201 }
205206 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
206207 MachineBasicBlock::iterator MBBI = MBB.begin();
207208 MachineFrameInfo *MFI = MF.getFrameInfo();
209 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
208210 MachineInstr *MI;
209211 bool FP = hasFP(MF);
210212
249251 }
250252 }
251253
252 MI=BuildMI(IA64::ALLOC,5).addReg(dstRegOfPseudoAlloc).addImm(0).\
254 MI=BuildMI(TII, IA64::ALLOC,5).addReg(dstRegOfPseudoAlloc).addImm(0). \
253255 addImm(numStackedGPRsUsed).addImm(numOutRegsUsed).addImm(0);
254256 MBB.insert(MBBI, MI);
255257
281283
282284 // adjust stack pointer: r12 -= numbytes
283285 if (NumBytes <= 8191) {
284 MI=BuildMI(IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).addImm(-NumBytes);
286 MI=BuildMI(TII, IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).
287 addImm(-NumBytes);
285288 MBB.insert(MBBI, MI);
286289 } else { // we use r22 as a scratch register here
287 MI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(-NumBytes);
290 MI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(-NumBytes);
288291 // FIXME: MOVLSI32 expects a _u_32imm
289292 MBB.insert(MBBI, MI); // first load the decrement into r22
290 MI=BuildMI(IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
293 MI=BuildMI(TII,IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
291294 MBB.insert(MBBI, MI); // then add (subtract) it to r12 (stack ptr)
292295 }
293296
294297 // now if we need to, save the old FP and set the new
295298 if (FP) {
296 MI = BuildMI(IA64::ST8, 2).addReg(IA64::r12).addReg(IA64::r5);
299 MI = BuildMI(TII, IA64::ST8, 2).addReg(IA64::r12).addReg(IA64::r5);
297300 MBB.insert(MBBI, MI);
298301 // this must be the last instr in the prolog ? (XXX: why??)
299 MI = BuildMI(IA64::MOV, 1, IA64::r5).addReg(IA64::r12);
302 MI = BuildMI(TII, IA64::MOV, 1, IA64::r5).addReg(IA64::r12);
300303 MBB.insert(MBBI, MI);
301304 }
302305
305308 void IA64RegisterInfo::emitEpilogue(MachineFunction &MF,
306309 MachineBasicBlock &MBB) const {
307310 const MachineFrameInfo *MFI = MF.getFrameInfo();
311 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
308312 MachineBasicBlock::iterator MBBI = prior(MBB.end());
309313 MachineInstr *MI;
310314 assert(MBBI->getOpcode() == IA64::RET &&
319323 if (FP)
320324 {
321325 //copy the FP into the SP (discards allocas)
322 MI=BuildMI(IA64::MOV, 1, IA64::r12).addReg(IA64::r5);
326 MI=BuildMI(TII, IA64::MOV, 1, IA64::r12).addReg(IA64::r5);
323327 MBB.insert(MBBI, MI);
324328 //restore the FP
325 MI=BuildMI(IA64::LD8, 1, IA64::r5).addReg(IA64::r5);
329 MI=BuildMI(TII, IA64::LD8, 1, IA64::r5).addReg(IA64::r5);
326330 MBB.insert(MBBI, MI);
327331 }
328332
329333 if (NumBytes != 0)
330334 {
331335 if (NumBytes <= 8191) {
332 MI=BuildMI(IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).addImm(NumBytes);
336 MI=BuildMI(TII, IA64::ADDIMM22,2,IA64::r12).addReg(IA64::r12).
337 addImm(NumBytes);
333338 MBB.insert(MBBI, MI);
334339 } else {
335 MI=BuildMI(IA64::MOVLIMM64, 1, IA64::r22).addImm(NumBytes);
340 MI=BuildMI(TII, IA64::MOVLIMM64, 1, IA64::r22).addImm(NumBytes);
336341 MBB.insert(MBBI, MI);
337 MI=BuildMI(IA64::ADD, 2, IA64::r12).addReg(IA64::r12).addReg(IA64::r22);
342 MI=BuildMI(TII, IA64::ADD, 2, IA64::r12).addReg(IA64::r12).
343 addReg(IA64::r22);
338344 MBB.insert(MBBI, MI);
339345 }
340346 }
1919
2020 PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm)
2121 : TargetInstrInfo(PPCInsts, sizeof(PPCInsts)/sizeof(PPCInsts[0])), TM(tm),
22 RI(*TM.getSubtargetImpl()) {}
22 RI(*TM.getSubtargetImpl(), *this) {}
2323
2424 /// getPointerRegClass - Return the register class to use to hold pointers.
2525 /// This is used for addressing modes.
7979 }
8080 }
8181
82 PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST)
82 PPCRegisterInfo::PPCRegisterInfo(const PPCSubtarget &ST,
83 const TargetInstrInfo &tii)
8384 : PPCGenRegisterInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP),
84 Subtarget(ST) {
85 Subtarget(ST), TII(tii) {
8586 ImmToIdxMap[PPC::LD] = PPC::LDX; ImmToIdxMap[PPC::STD] = PPC::STDX;
8687 ImmToIdxMap[PPC::LBZ] = PPC::LBZX; ImmToIdxMap[PPC::STB] = PPC::STBX;
8788 ImmToIdxMap[PPC::LHZ] = PPC::LHZX; ImmToIdxMap[PPC::LHA] = PPC::LHAX;
321322 MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
322323 if (OpNum == 0) { // move -> store
323324 unsigned InReg = MI->getOperand(1).getReg();
324 return addFrameReference(BuildMI(PPC::STW,
325 return addFrameReference(BuildMI(TII, PPC::STW,
325326 3).addReg(InReg), FrameIndex);
326327 } else { // move -> load
327328 unsigned OutReg = MI->getOperand(0).getReg();
328 return addFrameReference(BuildMI(PPC::LWZ, 2, OutReg), FrameIndex);
329 return addFrameReference(BuildMI(TII, PPC::LWZ, 2, OutReg), FrameIndex);
329330 }
330331 } else if ((Opc == PPC::OR8 &&
331332 MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
332333 if (OpNum == 0) { // move -> store
333334 unsigned InReg = MI->getOperand(1).getReg();
334 return addFrameReference(BuildMI(PPC::STD,
335 return addFrameReference(BuildMI(TII, PPC::STD,
335336 3).addReg(InReg), FrameIndex);
336337 } else { // move -> load
337338 unsigned OutReg = MI->getOperand(0).getReg();
338 return addFrameReference(BuildMI(PPC::LD, 2, OutReg), FrameIndex);
339 return addFrameReference(BuildMI(TII, PPC::LD, 2, OutReg), FrameIndex);
339340 }
340341 } else if (Opc == PPC::FMRD) {
341342 if (OpNum == 0) { // move -> store
342343 unsigned InReg = MI->getOperand(1).getReg();
343 return addFrameReference(BuildMI(PPC::STFD,
344 return addFrameReference(BuildMI(TII, PPC::STFD,
344345 3).addReg(InReg), FrameIndex);
345346 } else { // move -> load
346347 unsigned OutReg = MI->getOperand(0).getReg();
347 return addFrameReference(BuildMI(PPC::LFD, 2, OutReg), FrameIndex);
348 return addFrameReference(BuildMI(TII, PPC::LFD, 2, OutReg), FrameIndex);
348349 }
349350 } else if (Opc == PPC::FMRS) {
350351 if (OpNum == 0) { // move -> store
351352 unsigned InReg = MI->getOperand(1).getReg();
352 return addFrameReference(BuildMI(PPC::STFS,
353 return addFrameReference(BuildMI(TII, PPC::STFS,
353354 3).addReg(InReg), FrameIndex);
354355 } else { // move -> load
355356 unsigned OutReg = MI->getOperand(0).getReg();
356 return addFrameReference(BuildMI(PPC::LFS, 2, OutReg), FrameIndex);
357 return addFrameReference(BuildMI(TII, PPC::LFS, 2, OutReg), FrameIndex);
357358 }
358359 }
359360 return 0;
2424 class PPCRegisterInfo : public PPCGenRegisterInfo {
2525 std::map ImmToIdxMap;
2626 const PPCSubtarget &Subtarget;
27 const TargetInstrInfo &TII;
2728 public:
28 PPCRegisterInfo(const PPCSubtarget &SubTarget);
29 PPCRegisterInfo(const PPCSubtarget &SubTarget, const TargetInstrInfo &tii);
2930
3031 /// getRegisterNumbering - Given the enum value for some register, e.g.
3132 /// PPC::F14, return the number that it corresponds to (e.g. 14).
1818
1919 SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
2020 : TargetInstrInfo(SparcInsts, sizeof(SparcInsts)/sizeof(SparcInsts[0])),
21 RI(ST) {
21 RI(ST, *this) {
2222 }
2323
2424 static bool isZeroImm(const MachineOperand &op) {
2222 #include
2323 using namespace llvm;
2424
25 SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st)
25 SparcRegisterInfo::SparcRegisterInfo(SparcSubtarget &st,
26 const TargetInstrInfo &tii)
2627 : SparcGenRegisterInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP),
27 Subtarget(st) {
28 Subtarget(st), TII(tii) {
2829 }
2930
3031 void SparcRegisterInfo::
8081 if (MI->getOperand(1).isRegister() && MI->getOperand(1).getReg() == SP::G0&&
8182 MI->getOperand(0).isRegister() && MI->getOperand(2).isRegister()) {
8283 if (OpNum == 0) // COPY -> STORE
83 return BuildMI(SP::STri, 3).addFrameIndex(FI).addImm(0)
84 return BuildMI(TII, SP::STri, 3).addFrameIndex(FI).addImm(0)
8485 .addReg(MI->getOperand(2).getReg());
8586 else // COPY -> LOAD
86 return BuildMI(SP::LDri, 2, MI->getOperand(0).getReg())
87 return BuildMI(TII, SP::LDri, 2, MI->getOperand(0).getReg())
8788 .addFrameIndex(FI).addImm(0);
8889 }
8990 break;
9293 // FALLTHROUGH
9394 case SP::FMOVD:
9495 if (OpNum == 0) // COPY -> STORE
95 return BuildMI(isFloat ? SP::STFri : SP::STDFri, 3)
96 return BuildMI(TII, isFloat ? SP::STFri : SP::STDFri, 3)
9697 .addFrameIndex(FI).addImm(0).addReg(MI->getOperand(1).getReg());
9798 else // COPY -> LOAD
98 return BuildMI(isFloat ? SP::LDFri : SP::LDDFri, 2,
99 return BuildMI(TII, isFloat ? SP::LDFri : SP::LDDFri, 2,
99100 MI->getOperand(0).getReg()).addFrameIndex(FI).addImm(0);
100101 break;
101102 }
2323
2424 struct SparcRegisterInfo : public SparcGenRegisterInfo {
2525 SparcSubtarget &Subtarget;
26 const TargetInstrInfo &TII;
2627
27 SparcRegisterInfo(SparcSubtarget &st);
28 SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
2829
2930 /// Code Generation virtual methods...
3031 void storeRegToStackSlot(MachineBasicBlock &MBB,
3939 #include "llvm/Support/Debug.h"
4040 #include "llvm/Support/Compiler.h"
4141 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/SmallVector.h"
4243 #include "llvm/ADT/Statistic.h"
4344 #include "llvm/ADT/STLExtras.h"
4445 #include
212213
213214 // Get dead variables list now because the MI pointer may be deleted as part
214215 // of processing!
215 LiveVariables::killed_iterator IB, IE;
216 tie(IB, IE) = LV->dead_range(MI);
217
218 DEBUG(
219 const MRegisterInfo *MRI = MF.getTarget().getRegisterInfo();
220 LiveVariables::killed_iterator I = LV->killed_begin(MI);
221 LiveVariables::killed_iterator E = LV->killed_end(MI);
222 if (I != E) {
223 std::cerr << "Killed Operands:";
224 for (; I != E; ++I)
225 std::cerr << " %" << MRI->getName(*I);
226 std::cerr << "\n";
227 }
228 );
216 SmallVector DeadRegs;
217 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
218 const MachineOperand &MO = MI->getOperand(i);
219 if (MO.isReg() && MO.isDead())
220 DeadRegs.push_back(MO.getReg());
221 }
229222
230223 switch (Flags & X86II::FPTypeMask) {
231224 case X86II::ZeroArgFP: handleZeroArgFP(I); break;
240233
241234 // Check to see if any of the values defined by this instruction are dead
242235 // after definition. If so, pop them.
243 for (; IB != IE; ++IB) {
244 unsigned Reg = *IB;
236 for (unsigned i = 0, e = DeadRegs.size(); i != e; ++i) {
237 unsigned Reg = DeadRegs[i];
245238 if (Reg >= X86::FP0 && Reg <= X86::FP6) {
246239 DEBUG(std::cerr << "Register FP#" << Reg-X86::FP0 << " is dead!\n");
247240 freeStackSlotAfter(I, Reg-X86::FP0);
761754
762755 unsigned Op0 = getFPReg(MI->getOperand(0));
763756 unsigned Op1 = getFPReg(MI->getOperand(2));
757 bool KillsOp1 = LV->KillsRegister(MI, X86::FP0+Op1);
764758
765759 // The first operand *must* be on the top of the stack.
766760 moveToTop(Op0, I);
772766 MI->getOperand(0).setReg(getSTReg(Op1));
773767 MI->setOpcode(getConcreteOpcode(MI->getOpcode()));
774768
775
776769 // If we kill the second operand, make sure to pop it from the stack.
777 if (Op0 != Op1 && LV->KillsRegister(MI, X86::FP0+Op1)) {
770 if (Op0 != Op1 && KillsOp1) {
778771 // Get this value off of the register stack.
779772 freeStackSlotAfter(I, Op1);
780773 }
524524
525525 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
526526 if (ContainsFPCode) {
527 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0).
528 addImplicitDefsUses();
527 BuildMI(*BB, BB->getFirstTerminator(), X86::FP_REG_KILL, 0);
529528 ++NumFPKill;
530529 }
531530 }
536535 void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
537536 MachineFrameInfo *MFI) {
538537 if (Subtarget->isTargetCygwin())
539 BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("__main").
540 addImplicitDefsUses();
538 BuildMI(BB, X86::CALLpcrel32, 1).addExternalSymbol("__main");
541539
542540 // Switch the FPU to 64-bit precision mode for better compatibility and speed.
543541 int CWFrameIdx = MFI->CreateStackObject(2, 2);
948946 // type of register here.
949947 GlobalBaseReg = RegMap->createVirtualRegister(X86::GR32RegisterClass);
950948 BuildMI(FirstMBB, MBBI, X86::MovePCtoStack, 0);
951 BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg).
952 addImplicitDefsUses();
949 BuildMI(FirstMBB, MBBI, X86::POP32r, 1, GlobalBaseReg);
953950 }
954951 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
955952 }
50755075 MachineBasicBlock *sinkMBB = new MachineBasicBlock(LLVM_BB);
50765076 unsigned Opc =
50775077 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
5078 BuildMI(BB, Opc, 1).addMBB(sinkMBB).addImplicitDefsUses();
5078 BuildMI(BB, Opc, 1).addMBB(sinkMBB);
50795079 MachineFunction *F = BB->getParent();
50805080 F->getBasicBlockList().insert(It, copy0MBB);
50815081 F->getBasicBlockList().insert(It, sinkMBB);
139139 unsigned C = MI->getOperand(2).getReg();
140140 unsigned M = MI->getOperand(3).getImmedValue();
141141 if (!Subtarget->hasSSE2() || B != C) return 0;
142 return BuildMI(X86::PSHUFDri, 2, A).addReg(B).addImm(M);
142 return BuildMI(*this, X86::PSHUFDri, 2, A).addReg(B).addImm(M);
143143 }
144144 }
145145
156156 case X86::INC32r:
157157 case X86::INC64_32r:
158158 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
159 return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, 1);
159 return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src, 1);
160160 case X86::INC16r:
161161 case X86::INC64_16r:
162162 if (DisableLEA16) return 0;
163163 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
164 return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, 1);
164 return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src, 1);
165165 case X86::DEC32r:
166166 case X86::DEC64_32r:
167167 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
168 return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src, -1);
168 return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src, -1);
169169 case X86::DEC16r:
170170 case X86::DEC64_16r:
171171 if (DisableLEA16) return 0;
172172 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
173 return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src, -1);
173 return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src, -1);
174174 case X86::ADD32rr:
175175 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
176 return addRegReg(BuildMI(X86::LEA32r, 5, Dest), Src,
176 return addRegReg(BuildMI(*this, X86::LEA32r, 5, Dest), Src,
177177 MI->getOperand(2).getReg());
178178 case X86::ADD16rr:
179179 if (DisableLEA16) return 0;
180180 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
181 return addRegReg(BuildMI(X86::LEA16r, 5, Dest), Src,
181 return addRegReg(BuildMI(*this, X86::LEA16r, 5, Dest), Src,
182182 MI->getOperand(2).getReg());
183183 case X86::ADD32ri:
184184 case X86::ADD32ri8:
185185 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
186186 if (MI->getOperand(2).isImmediate())
187 return addRegOffset(BuildMI(X86::LEA32r, 5, Dest), Src,
187 return addRegOffset(BuildMI(*this, X86::LEA32r, 5, Dest), Src,
188188 MI->getOperand(2).getImmedValue());
189189 return 0;
190190 case X86::ADD16ri:
192192 if (DisableLEA16) return 0;
193193 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
194194 if (MI->getOperand(2).isImmediate())
195 return addRegOffset(BuildMI(X86::LEA16r, 5, Dest), Src,
195 return addRegOffset(BuildMI(*this, X86::LEA16r, 5, Dest), Src,
196196 MI->getOperand(2).getImmedValue());
197197 break;
198198
207207 AM.Scale = 1 << ShAmt;
208208 AM.IndexReg = Src;
209209 unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r;
210 return addFullAddress(BuildMI(Opc, 5, Dest), AM);
210 return addFullAddress(BuildMI(*this, Opc, 5, Dest), AM);
211211 }
212212 break;
213213 }
238238 unsigned A = MI->getOperand(0).getReg();
239239 unsigned B = MI->getOperand(1).getReg();
240240 unsigned C = MI->getOperand(2).getReg();
241 return BuildMI(Opc, 3, A).addReg(C).addReg(B).addImm(Size-Amt);
241 return BuildMI(*this, Opc, 3, A).addReg(C).addReg(B).addImm(Size-Amt);
242242 }
243243 default:
244244 return TargetInstrInfo::commuteInstruction(MI);
164164 const TargetInstrInfo &TII) {
165165 unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2;
166166 // Create the base instruction with the memory operand as the first part.
167 MachineInstrBuilder MIB = addFrameReference(BuildMI(Opcode, 4+NumOps),
167 MachineInstrBuilder MIB = addFrameReference(BuildMI(TII, Opcode, 4+NumOps),
168168 FrameIndex);
169169
170170 // Loop over the rest of the ri operands, converting them over.
187187 static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
188188 unsigned FrameIndex, MachineInstr *MI,
189189 const TargetInstrInfo &TII) {
190 MachineInstrBuilder MIB = BuildMI(Opcode, MI->getNumOperands()+3);
190 MachineInstrBuilder MIB = BuildMI(TII, Opcode, MI->getNumOperands()+3);
191191
192192 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
193193 MachineOperand &MO = MI->getOperand(i);
208208 return MIB;
209209 }
210210
211 static MachineInstr *MakeM0Inst(unsigned Opcode, unsigned FrameIndex,
211 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII,
212 unsigned Opcode, unsigned FrameIndex,
212213 MachineInstr *MI) {
213 return addFrameReference(BuildMI(Opcode, 5), FrameIndex).addImm(0);
214 return addFrameReference(BuildMI(TII, Opcode, 5), FrameIndex).addImm(0);
214215 }
215216
216217
463464 isTwoAddrFold = true;
464465 } else if (i == 0) { // If operand 0
465466 if (MI->getOpcode() == X86::MOV16r0)
466 return MakeM0Inst(X86::MOV16mi, FrameIndex, MI);
467 return MakeM0Inst(TII, X86::MOV16mi, FrameIndex, MI);
467468 else if (MI->getOpcode() == X86::MOV32r0)
468 return MakeM0Inst(X86::MOV32mi, FrameIndex, MI);
469 return MakeM0Inst(TII, X86::MOV32mi, FrameIndex, MI);
469470 else if (MI->getOpcode() == X86::MOV64r0)
470 return MakeM0Inst(X86::MOV64mi32, FrameIndex, MI);
471 return MakeM0Inst(TII, X86::MOV64mi32, FrameIndex, MI);
471472 else if (MI->getOpcode() == X86::MOV8r0)
472 return MakeM0Inst(X86::MOV8mi, FrameIndex, MI);
473 return MakeM0Inst(TII, X86::MOV8mi, FrameIndex, MI);
473474
474475 static const TableEntry OpcodeTable[] = {
475476 { X86::CMP16ri, X86::CMP16mi },
898899
899900 MachineInstr *New = 0;
900901 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
901 New=BuildMI(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri, 1, StackPtr)
902 New=BuildMI(TII, Is64Bit ? X86::SUB64ri32 : X86::SUB32ri, 1, StackPtr)
902903 .addReg(StackPtr).addImm(Amount);
903904 } else {
904905 assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
909910 unsigned Opc = (Amount < 128) ?
910911 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
911912 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
912 New = BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(Amount);
913 New = BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(Amount);
913914 }
914915 }
915916
925926 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
926927 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
927928 MachineInstr *New =
928 BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(CalleeAmt);
929 BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(CalleeAmt);
929930 MBB.insert(I, New);
930931 }
931932 }
10021003 // more than 4k bytes in one go. Touching the stack at 4K increments is
10031004 // necessary to ensure that the guard pages used by the OS virtual memory
10041005 // manager are allocated in correct sequence.
1005 MI = BuildMI(X86::MOV32ri, 2, X86::EAX).addImm(NumBytes);
1006 MI = BuildMI(TII, X86::MOV32ri, 2, X86::EAX).addImm(NumBytes);
10061007 MBB.insert(MBBI, MI);
1007 MI = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
1008 MI = BuildMI(TII, X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
10081009 MBB.insert(MBBI, MI);
10091010 } else {
10101011 unsigned Opc = (NumBytes < 128) ?
10111012 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
10121013 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
1013 MI= BuildMI(Opc, 1, StackPtr).addReg(StackPtr).addImm(NumBytes);
1014 MI= BuildMI(TII, Opc, 1, StackPtr).addReg(StackPtr).addImm(NumBytes);
10141015 MBB.insert(MBBI, MI);
10151016 }
10161017 }
10221023
10231024 // Save EBP into the appropriate stack slot...
10241025 // mov [ESP-], EBP
1025 MI = addRegOffset(BuildMI(Is64Bit ? X86::MOV64mr : X86::MOV32mr, 5),
1026 MI = addRegOffset(BuildMI(TII, Is64Bit ? X86::MOV64mr : X86::MOV32mr, 5),
10261027 StackPtr, EBPOffset+NumBytes).addReg(FramePtr);
10271028 MBB.insert(MBBI, MI);
10281029
10291030 // Update EBP with the new base value...
10301031 if (NumBytes == SlotSize) // mov EBP, ESP
1031 MI = BuildMI(Is64Bit ? X86::MOV64rr : X86::MOV32rr, 2, FramePtr).
1032 MI = BuildMI(TII, Is64Bit ? X86::MOV64rr : X86::MOV32rr, 2, FramePtr).
10321033 addReg(StackPtr);
10331034 else // lea EBP, [ESP+StackSize]
1034 MI = addRegOffset(BuildMI(Is64Bit ? X86::LEA64r : X86::LEA32r,
1035 MI = addRegOffset(BuildMI(TII, Is64Bit ? X86::LEA64r : X86::LEA32r,
10351036 5, FramePtr), StackPtr, NumBytes-SlotSize);
10361037
10371038 MBB.insert(MBBI, MI);
10401041 // If it's main() on Cygwin\Mingw32 we should align stack as well
10411042 if (Fn->hasExternalLinkage() && Fn->getName() == "main" &&
10421043 Subtarget->isTargetCygwin()) {
1043 MI = BuildMI(X86::AND32ri, 2, X86::ESP).addReg(X86::ESP).addImm(-Align);
1044 MI= BuildMI(TII, X86::AND32ri, 2, X86::ESP).addReg(X86::ESP).addImm(-Align);
10441045 MBB.insert(MBBI, MI);
10451046
10461047 // Probe the stack
1047 MI = BuildMI(X86::MOV32ri, 2, X86::EAX).addImm(Align);
1048 MI = BuildMI(TII, X86::MOV32ri, 2, X86::EAX).addImm(Align);
10481049 MBB.insert(MBBI, MI);
1049 MI = BuildMI(X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
1050 MI = BuildMI(TII, X86::CALLpcrel32, 1).addExternalSymbol("_alloca");
10501051 MBB.insert(MBBI, MI);
10511052 }
10521053 }