llvm.org GIT mirror llvm / f6372aa
Move some more instruction creation methods from RegisterInfo into InstrInfo. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@45484 91177308-0d34-0410-b5e6-96231b3b80d8 Owen Anderson 12 years ago
40 changed file(s) with 1639 addition(s) and 1551 deletion(s). Raw diff Collapse all Expand all
489489 return false;
490490 }
491491
492 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
493 MachineBasicBlock::iterator MI,
494 unsigned SrcReg, bool isKill, int FrameIndex,
495 const TargetRegisterClass *RC) const = 0;
496
497 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
498 SmallVectorImpl &Addr,
499 const TargetRegisterClass *RC,
500 SmallVectorImpl &NewMIs) const = 0;
501
502 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
503 MachineBasicBlock::iterator MI,
504 unsigned DestReg, int FrameIndex,
505 const TargetRegisterClass *RC) const = 0;
506
507 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
508 SmallVectorImpl &Addr,
509 const TargetRegisterClass *RC,
510 SmallVectorImpl &NewMIs) const =0;
511
512492 /// getCrossCopyRegClass - Returns a legal register class to copy a register
513493 /// in the specified class to or from. Returns NULL if it is possible to copy
514494 /// between a two registers of the specified class.
2525 class TargetMachine;
2626 class TargetRegisterClass;
2727 class LiveVariables;
28
29 template class SmallVectorImpl;
2830
2931 //---------------------------------------------------------------------------
3032 // Data types used to define information about a single machine instruction
466468 assert(0 && "Target didn't implement TargetInstrInfo::copyRegToReg!");
467469 }
468470
471 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
472 MachineBasicBlock::iterator MI,
473 unsigned SrcReg, bool isKill, int FrameIndex,
474 const TargetRegisterClass *RC) const {
475 assert(0 && "Target didn't implement TargetInstrInfo::storeRegToStackSlot!");
476 }
477
478 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
479 SmallVectorImpl &Addr,
480 const TargetRegisterClass *RC,
481 SmallVectorImpl &NewMIs) const {
482 assert(0 && "Target didn't implement TargetInstrInfo::storeRegToAddr!");
483 }
484
485 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
486 MachineBasicBlock::iterator MI,
487 unsigned DestReg, int FrameIndex,
488 const TargetRegisterClass *RC) const {
489 assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromStackSlot!");
490 }
491
492 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
493 SmallVectorImpl &Addr,
494 const TargetRegisterClass *RC,
495 SmallVectorImpl &NewMIs) const {
496 assert(0 && "Target didn't implement TargetInstrInfo::loadRegFromAddr!");
497 }
498
469499 /// BlockHasNoFallThrough - Return true if the specified block does not
470500 /// fall-through into its successor block. This is primarily used when a
471501 /// branch is unanalyzable. It is useful for things like unconditional
242242 return;
243243
244244 const MRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo();
245
245 const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
246
246247 // Now that we have a stack slot for each register to be saved, insert spill
247248 // code into the entry block.
248249 MachineBasicBlock *MBB = Fn.begin();
253254 MBB->addLiveIn(CSI[i].getReg());
254255
255256 // Insert the spill to the stack frame.
256 RegInfo->storeRegToStackSlot(*MBB, I, CSI[i].getReg(), true,
257 TII.storeRegToStackSlot(*MBB, I, CSI[i].getReg(), true,
257258 CSI[i].getFrameIdx(), CSI[i].getRegClass());
258259 }
259260 }
260261
261262 // Add code to restore the callee-save registers in each exiting block.
262 const TargetInstrInfo &TII = *Fn.getTarget().getInstrInfo();
263263 for (MachineFunction::iterator FI = Fn.begin(), E = Fn.end(); FI != E; ++FI)
264264 // If last instruction is a return instruction, add an epilogue.
265265 if (!FI->empty() && TII.isReturn(FI->back().getOpcode())) {
281281 // that preceed it.
282282 if (!RegInfo->restoreCalleeSavedRegisters(*MBB, I, CSI)) {
283283 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
284 RegInfo->loadRegFromStackSlot(*MBB, I, CSI[i].getReg(),
284 TII.loadRegFromStackSlot(*MBB, I, CSI[i].getReg(),
285285 CSI[i].getFrameIdx(),
286286 CSI[i].getRegClass());
287287 assert(I != MBB->begin() &&
318318 " the intended one.");
319319 DOUT << " Spilling register " << RegInfo->getName(PhysReg)
320320 << " containing %reg" << VirtReg;
321
322 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
323
321324 if (!isVirtRegModified(VirtReg))
322325 DOUT << " which has not been modified, so no store necessary!";
323326
328331 const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
329332 int FrameIndex = getStackSpaceFor(VirtReg, RC);
330333 DOUT << " to stack slot #" << FrameIndex;
331 RegInfo->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIndex, RC);
334 TII->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIndex, RC);
332335 ++NumStores; // Update statistics
333336 }
334337
541544 << RegInfo->getName(PhysReg) << "\n";
542545
543546 // Add move instruction(s)
544 RegInfo->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
547 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
548 TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
545549 ++NumLoads; // Update statistics
546550
547551 MF->getRegInfo().setPhysRegUsed(PhysReg);
275275 " the intended one.");
276276 DOUT << " Spilling register " << MRI->getName(PhysReg)
277277 << " containing %reg" << VirtReg;
278
279 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
280
278281 if (!isVirtRegModified(VirtReg))
279282 DOUT << " which has not been modified, so no store necessary!";
280283
285288 const TargetRegisterClass *RC = MF->getRegInfo().getRegClass(VirtReg);
286289 int FrameIndex = getStackSpaceFor(VirtReg, RC);
287290 DOUT << " to stack slot #" << FrameIndex;
288 MRI->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIndex, RC);
291 TII->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIndex, RC);
289292 ++NumStores; // Update statistics
290293 }
291294
494497 << MRI->getName(PhysReg) << "\n";
495498
496499 // Add move instruction(s)
497 MRI->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
500 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
501 TII->loadRegFromStackSlot(MBB, MI, PhysReg, FrameIndex, RC);
498502 ++NumLoads; // Update statistics
499503
500504 MF->getRegInfo().setPhysRegUsed(PhysReg);
143143
144144 // Add move instruction(s)
145145 ++NumLoads;
146 MRI->loadRegFromStackSlot(MBB, I, PhysReg, FrameIdx, RC);
146 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
147 TII->loadRegFromStackSlot(MBB, I, PhysReg, FrameIdx, RC);
147148 return PhysReg;
148149 }
149150
151152 MachineBasicBlock::iterator I,
152153 unsigned VirtReg, unsigned PhysReg) {
153154 const TargetRegisterClass* RC = MF->getRegInfo().getRegClass(VirtReg);
155 const TargetInstrInfo* TII = MBB.getParent()->getTarget().getInstrInfo();
156
154157 int FrameIdx = getStackSpaceFor(VirtReg, RC);
155158
156159 // Add move instruction(s)
157160 ++NumStores;
158 MRI->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIdx, RC);
161 TII->storeRegToStackSlot(MBB, I, PhysReg, true, FrameIdx, RC);
159162 }
160163
161164
7171 if (!ScavengedReg)
7272 return;
7373
74 RegInfo->loadRegFromStackSlot(*MBB, MBBI, ScavengedReg,
74 TII->loadRegFromStackSlot(*MBB, MBBI, ScavengedReg,
7575 ScavengingFrameIndex, ScavengedRC);
7676 MachineBasicBlock::iterator II = prior(MBBI);
7777 RegInfo->eliminateFrameIndex(II, 0, this);
275275
276276 if (ScavengedReg != 0) {
277277 // First restore previously scavenged register.
278 RegInfo->loadRegFromStackSlot(*MBB, I, ScavengedReg,
278 TII->loadRegFromStackSlot(*MBB, I, ScavengedReg,
279279 ScavengingFrameIndex, ScavengedRC);
280280 MachineBasicBlock::iterator II = prior(I);
281281 RegInfo->eliminateFrameIndex(II, SPAdj, this);
282282 }
283283
284 RegInfo->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC);
284 TII->storeRegToStackSlot(*MBB, I, SReg, true, ScavengingFrameIndex, RC);
285285 MachineBasicBlock::iterator II = prior(I);
286286 RegInfo->eliminateFrameIndex(II, SPAdj, this);
287287 ScavengedReg = SReg;
172172 DOUT << "********** REWRITE MACHINE CODE **********\n";
173173 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
174174 const TargetMachine &TM = MF.getTarget();
175 const MRegisterInfo &MRI = *TM.getRegisterInfo();
175 const TargetInstrInfo &TII = *TM.getInstrInfo();
176
176177
177178 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
178179 // each vreg once (in the case where a spilled vreg is used by multiple
201202 if (MO.isUse() &&
202203 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
203204 == LoadedRegs.end()) {
204 MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
205 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
205206 LoadedRegs.push_back(VirtReg);
206207 ++NumLoads;
207208 DOUT << '\t' << *prior(MII);
208209 }
209210
210211 if (MO.isDef()) {
211 MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
212 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
212213 StackSlot, RC);
213214 ++NumStores;
214215 }
644645 BitVector &RegKills,
645646 std::vector &KillOps,
646647 VirtRegMap &VRM) {
648 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
649 .getInstrInfo();
650
647651 if (Reuses.empty()) return PhysReg; // This is most often empty.
648652
649653 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
692696 VRM.getReMaterializedMI(NewOp.VirtReg));
693697 ++NumReMats;
694698 } else {
695 MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
699 TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
696700 NewOp.StackSlotOrReMat, AliasRC);
697701 // Any stores to this stack slot are not dead anymore.
698702 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
875879 BitVector &RegKills,
876880 std::vector &KillOps,
877881 VirtRegMap &VRM) {
878 MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
882 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
879883 DOUT << "Store:\t" << *next(MII);
880884
881885 // If there is a dead store to this stack slot, nuke it now.
978982 ++NumReMats;
979983 } else {
980984 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
981 MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
985 TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
982986 RC);
983987 ++NumLoads;
984988 }
10011005 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
10021006 unsigned Phys = VRM.getPhys(VirtReg);
10031007 int StackSlot = VRM.getStackSlot(VirtReg);
1004 MRI->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1008 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
10051009 MachineInstr *StoreMI = next(MII);
10061010 DOUT << "Store:\t" << StoreMI;
10071011 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
12171221 ++NumReMats;
12181222 } else {
12191223 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1220 MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1224 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
12211225 ++NumLoads;
12221226 }
12231227 // This invalidates PhysReg.
469469 abort();
470470 }
471471
472 static const MachineInstrBuilder &ARMInstrAddOperand(MachineInstrBuilder &MIB,
473 MachineOperand &MO) {
474 if (MO.isRegister())
475 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
476 else if (MO.isImmediate())
477 MIB = MIB.addImm(MO.getImm());
478 else if (MO.isFrameIndex())
479 MIB = MIB.addFrameIndex(MO.getIndex());
480 else
481 assert(0 && "Unknown operand for ARMInstrAddOperand!");
482
483 return MIB;
484 }
485
486 void ARMInstrInfo::
487 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
488 unsigned SrcReg, bool isKill, int FI,
489 const TargetRegisterClass *RC) const {
490 if (RC == ARM::GPRRegisterClass) {
491 MachineFunction &MF = *MBB.getParent();
492 ARMFunctionInfo *AFI = MF.getInfo();
493 if (AFI->isThumbFunction())
494 BuildMI(MBB, I, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
495 .addFrameIndex(FI).addImm(0);
496 else
497 AddDefaultPred(BuildMI(MBB, I, get(ARM::STR))
498 .addReg(SrcReg, false, false, isKill)
499 .addFrameIndex(FI).addReg(0).addImm(0));
500 } else if (RC == ARM::DPRRegisterClass) {
501 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTD))
502 .addReg(SrcReg, false, false, isKill)
503 .addFrameIndex(FI).addImm(0));
504 } else {
505 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
506 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTS))
507 .addReg(SrcReg, false, false, isKill)
508 .addFrameIndex(FI).addImm(0));
509 }
510 }
511
512 void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
513 bool isKill,
514 SmallVectorImpl &Addr,
515 const TargetRegisterClass *RC,
516 SmallVectorImpl &NewMIs) const {
517 unsigned Opc = 0;
518 if (RC == ARM::GPRRegisterClass) {
519 ARMFunctionInfo *AFI = MF.getInfo();
520 if (AFI->isThumbFunction()) {
521 Opc = Addr[0].isFrameIndex() ? ARM::tSpill : ARM::tSTR;
522 MachineInstrBuilder MIB =
523 BuildMI(get(Opc)).addReg(SrcReg, false, false, isKill);
524 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
525 MIB = ARMInstrAddOperand(MIB, Addr[i]);
526 NewMIs.push_back(MIB);
527 return;
528 }
529 Opc = ARM::STR;
530 } else if (RC == ARM::DPRRegisterClass) {
531 Opc = ARM::FSTD;
532 } else {
533 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
534 Opc = ARM::FSTS;
535 }
536
537 MachineInstrBuilder MIB =
538 BuildMI(get(Opc)).addReg(SrcReg, false, false, isKill);
539 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
540 MIB = ARMInstrAddOperand(MIB, Addr[i]);
541 AddDefaultPred(MIB);
542 NewMIs.push_back(MIB);
543 return;
544 }
545
546 void ARMInstrInfo::
547 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
548 unsigned DestReg, int FI,
549 const TargetRegisterClass *RC) const {
550 if (RC == ARM::GPRRegisterClass) {
551 MachineFunction &MF = *MBB.getParent();
552 ARMFunctionInfo *AFI = MF.getInfo();
553 if (AFI->isThumbFunction())
554 BuildMI(MBB, I, get(ARM::tRestore), DestReg)
555 .addFrameIndex(FI).addImm(0);
556 else
557 AddDefaultPred(BuildMI(MBB, I, get(ARM::LDR), DestReg)
558 .addFrameIndex(FI).addReg(0).addImm(0));
559 } else if (RC == ARM::DPRRegisterClass) {
560 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDD), DestReg)
561 .addFrameIndex(FI).addImm(0));
562 } else {
563 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
564 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDS), DestReg)
565 .addFrameIndex(FI).addImm(0));
566 }
567 }
568
569 void ARMInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
570 SmallVectorImpl &Addr,
571 const TargetRegisterClass *RC,
572 SmallVectorImpl &NewMIs) const {
573 unsigned Opc = 0;
574 if (RC == ARM::GPRRegisterClass) {
575 ARMFunctionInfo *AFI = MF.getInfo();
576 if (AFI->isThumbFunction()) {
577 Opc = Addr[0].isFrameIndex() ? ARM::tRestore : ARM::tLDR;
578 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
579 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
580 MIB = ARMInstrAddOperand(MIB, Addr[i]);
581 NewMIs.push_back(MIB);
582 return;
583 }
584 Opc = ARM::LDR;
585 } else if (RC == ARM::DPRRegisterClass) {
586 Opc = ARM::FLDD;
587 } else {
588 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
589 Opc = ARM::FLDS;
590 }
591
592 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
593 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
594 MIB = ARMInstrAddOperand(MIB, Addr[i]);
595 AddDefaultPred(MIB);
596 NewMIs.push_back(MIB);
597 return;
598 }
599
472600 bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
473601 if (MBB.empty()) return false;
474602
164164 unsigned DestReg, unsigned SrcReg,
165165 const TargetRegisterClass *DestRC,
166166 const TargetRegisterClass *SrcRC) const;
167 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
168 MachineBasicBlock::iterator MBBI,
169 unsigned SrcReg, bool isKill, int FrameIndex,
170 const TargetRegisterClass *RC) const;
171
172 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
173 SmallVectorImpl &Addr,
174 const TargetRegisterClass *RC,
175 SmallVectorImpl &NewMIs) const;
176
177 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
178 MachineBasicBlock::iterator MBBI,
179 unsigned DestReg, int FrameIndex,
180 const TargetRegisterClass *RC) const;
181
182 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
183 SmallVectorImpl &Addr,
184 const TargetRegisterClass *RC,
185 SmallVectorImpl &NewMIs) const;
167186 virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
168187 virtual bool ReverseBranchCondition(std::vector &Cond) const;
169188
139139 static inline
140140 const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
141141 return MIB.addReg(0);
142 }
143
144 static const MachineInstrBuilder &ARMInstrAddOperand(MachineInstrBuilder &MIB,
145 MachineOperand &MO) {
146 if (MO.isRegister())
147 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
148 else if (MO.isImmediate())
149 MIB = MIB.addImm(MO.getImm());
150 else if (MO.isFrameIndex())
151 MIB = MIB.addFrameIndex(MO.getIndex());
152 else
153 assert(0 && "Unknown operand for ARMInstrAddOperand!");
154
155 return MIB;
156 }
157
158 void ARMRegisterInfo::
159 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
160 unsigned SrcReg, bool isKill, int FI,
161 const TargetRegisterClass *RC) const {
162 if (RC == ARM::GPRRegisterClass) {
163 MachineFunction &MF = *MBB.getParent();
164 ARMFunctionInfo *AFI = MF.getInfo();
165 if (AFI->isThumbFunction())
166 BuildMI(MBB, I, TII.get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
167 .addFrameIndex(FI).addImm(0);
168 else
169 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::STR))
170 .addReg(SrcReg, false, false, isKill)
171 .addFrameIndex(FI).addReg(0).addImm(0));
172 } else if (RC == ARM::DPRRegisterClass) {
173 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::FSTD))
174 .addReg(SrcReg, false, false, isKill)
175 .addFrameIndex(FI).addImm(0));
176 } else {
177 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
178 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::FSTS))
179 .addReg(SrcReg, false, false, isKill)
180 .addFrameIndex(FI).addImm(0));
181 }
182 }
183
184 void ARMRegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
185 bool isKill,
186 SmallVectorImpl &Addr,
187 const TargetRegisterClass *RC,
188 SmallVectorImpl &NewMIs) const {
189 unsigned Opc = 0;
190 if (RC == ARM::GPRRegisterClass) {
191 ARMFunctionInfo *AFI = MF.getInfo();
192 if (AFI->isThumbFunction()) {
193 Opc = Addr[0].isFrameIndex() ? ARM::tSpill : ARM::tSTR;
194 MachineInstrBuilder MIB =
195 BuildMI(TII.get(Opc)).addReg(SrcReg, false, false, isKill);
196 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
197 MIB = ARMInstrAddOperand(MIB, Addr[i]);
198 NewMIs.push_back(MIB);
199 return;
200 }
201 Opc = ARM::STR;
202 } else if (RC == ARM::DPRRegisterClass) {
203 Opc = ARM::FSTD;
204 } else {
205 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
206 Opc = ARM::FSTS;
207 }
208
209 MachineInstrBuilder MIB =
210 BuildMI(TII.get(Opc)).addReg(SrcReg, false, false, isKill);
211 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
212 MIB = ARMInstrAddOperand(MIB, Addr[i]);
213 AddDefaultPred(MIB);
214 NewMIs.push_back(MIB);
215 return;
216 }
217
218 void ARMRegisterInfo::
219 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
220 unsigned DestReg, int FI,
221 const TargetRegisterClass *RC) const {
222 if (RC == ARM::GPRRegisterClass) {
223 MachineFunction &MF = *MBB.getParent();
224 ARMFunctionInfo *AFI = MF.getInfo();
225 if (AFI->isThumbFunction())
226 BuildMI(MBB, I, TII.get(ARM::tRestore), DestReg)
227 .addFrameIndex(FI).addImm(0);
228 else
229 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::LDR), DestReg)
230 .addFrameIndex(FI).addReg(0).addImm(0));
231 } else if (RC == ARM::DPRRegisterClass) {
232 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::FLDD), DestReg)
233 .addFrameIndex(FI).addImm(0));
234 } else {
235 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
236 AddDefaultPred(BuildMI(MBB, I, TII.get(ARM::FLDS), DestReg)
237 .addFrameIndex(FI).addImm(0));
238 }
239 }
240
241 void ARMRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
242 SmallVectorImpl &Addr,
243 const TargetRegisterClass *RC,
244 SmallVectorImpl &NewMIs) const {
245 unsigned Opc = 0;
246 if (RC == ARM::GPRRegisterClass) {
247 ARMFunctionInfo *AFI = MF.getInfo();
248 if (AFI->isThumbFunction()) {
249 Opc = Addr[0].isFrameIndex() ? ARM::tRestore : ARM::tLDR;
250 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
251 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
252 MIB = ARMInstrAddOperand(MIB, Addr[i]);
253 NewMIs.push_back(MIB);
254 return;
255 }
256 Opc = ARM::LDR;
257 } else if (RC == ARM::DPRRegisterClass) {
258 Opc = ARM::FLDD;
259 } else {
260 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
261 Opc = ARM::FLDS;
262 }
263
264 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
265 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
266 MIB = ARMInstrAddOperand(MIB, Addr[i]);
267 AddDefaultPred(MIB);
268 NewMIs.push_back(MIB);
269 return;
270142 }
271143
272144 /// emitLoadConstPool - Emits a load from constpool to materialize the
4343 bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
4444 MachineBasicBlock::iterator MI,
4545 const std::vector &CSI) const;
46
47 void storeRegToStackSlot(MachineBasicBlock &MBB,
48 MachineBasicBlock::iterator MBBI,
49 unsigned SrcReg, bool isKill, int FrameIndex,
50 const TargetRegisterClass *RC) const;
51
52 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
53 SmallVectorImpl &Addr,
54 const TargetRegisterClass *RC,
55 SmallVectorImpl &NewMIs) const;
56
57 void loadRegFromStackSlot(MachineBasicBlock &MBB,
58 MachineBasicBlock::iterator MBBI,
59 unsigned DestReg, int FrameIndex,
60 const TargetRegisterClass *RC) const;
61
62 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
63 SmallVectorImpl &Addr,
64 const TargetRegisterClass *RC,
65 SmallVectorImpl &NewMIs) const;
66
67 void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
68 unsigned DestReg, unsigned SrcReg,
69 const TargetRegisterClass *DestRC,
70 const TargetRegisterClass *SrcRC) const;
7146
7247 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
7348 unsigned DestReg, const MachineInstr *Orig) const;
154154 }
155155 }
156156
157 void
158 AlphaInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
159 MachineBasicBlock::iterator MI,
160 unsigned SrcReg, bool isKill, int FrameIdx,
161 const TargetRegisterClass *RC) const {
162 //cerr << "Trying to store " << getPrettyName(SrcReg) << " to "
163 // << FrameIdx << "\n";
164 //BuildMI(MBB, MI, Alpha::WTF, 0).addReg(SrcReg);
165 if (RC == Alpha::F4RCRegisterClass)
166 BuildMI(MBB, MI, get(Alpha::STS))
167 .addReg(SrcReg, false, false, isKill)
168 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
169 else if (RC == Alpha::F8RCRegisterClass)
170 BuildMI(MBB, MI, get(Alpha::STT))
171 .addReg(SrcReg, false, false, isKill)
172 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
173 else if (RC == Alpha::GPRCRegisterClass)
174 BuildMI(MBB, MI, get(Alpha::STQ))
175 .addReg(SrcReg, false, false, isKill)
176 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
177 else
178 abort();
179 }
180
181 void AlphaInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
182 bool isKill,
183 SmallVectorImpl &Addr,
184 const TargetRegisterClass *RC,
185 SmallVectorImpl &NewMIs) const {
186 unsigned Opc = 0;
187 if (RC == Alpha::F4RCRegisterClass)
188 Opc = Alpha::STS;
189 else if (RC == Alpha::F8RCRegisterClass)
190 Opc = Alpha::STT;
191 else if (RC == Alpha::GPRCRegisterClass)
192 Opc = Alpha::STQ;
193 else
194 abort();
195 MachineInstrBuilder MIB =
196 BuildMI(get(Opc)).addReg(SrcReg, false, false, isKill);
197 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
198 MachineOperand &MO = Addr[i];
199 if (MO.isRegister())
200 MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
201 else
202 MIB.addImm(MO.getImm());
203 }
204 NewMIs.push_back(MIB);
205 }
206
207 void
208 AlphaInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
209 MachineBasicBlock::iterator MI,
210 unsigned DestReg, int FrameIdx,
211 const TargetRegisterClass *RC) const {
212 //cerr << "Trying to load " << getPrettyName(DestReg) << " to "
213 // << FrameIdx << "\n";
214 if (RC == Alpha::F4RCRegisterClass)
215 BuildMI(MBB, MI, get(Alpha::LDS), DestReg)
216 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
217 else if (RC == Alpha::F8RCRegisterClass)
218 BuildMI(MBB, MI, get(Alpha::LDT), DestReg)
219 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
220 else if (RC == Alpha::GPRCRegisterClass)
221 BuildMI(MBB, MI, get(Alpha::LDQ), DestReg)
222 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
223 else
224 abort();
225 }
226
227 void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
228 SmallVectorImpl &Addr,
229 const TargetRegisterClass *RC,
230 SmallVectorImpl &NewMIs) const {
231 unsigned Opc = 0;
232 if (RC == Alpha::F4RCRegisterClass)
233 Opc = Alpha::LDS;
234 else if (RC == Alpha::F8RCRegisterClass)
235 Opc = Alpha::LDT;
236 else if (RC == Alpha::GPRCRegisterClass)
237 Opc = Alpha::LDQ;
238 else
239 abort();
240 MachineInstrBuilder MIB =
241 BuildMI(get(Opc), DestReg);
242 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
243 MachineOperand &MO = Addr[i];
244 if (MO.isRegister())
245 MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
246 else
247 MIB.addImm(MO.getImm());
248 }
249 NewMIs.push_back(MIB);
250 }
251
157252 static unsigned AlphaRevCondCode(unsigned Opcode) {
158253 switch (Opcode) {
159254 case Alpha::BEQ: return Alpha::BNE;
4646 unsigned DestReg, unsigned SrcReg,
4747 const TargetRegisterClass *DestRC,
4848 const TargetRegisterClass *SrcRC) const;
49 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
50 MachineBasicBlock::iterator MBBI,
51 unsigned SrcReg, bool isKill, int FrameIndex,
52 const TargetRegisterClass *RC) const;
53
54 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
55 SmallVectorImpl &Addr,
56 const TargetRegisterClass *RC,
57 SmallVectorImpl &NewMIs) const;
58
59 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
60 MachineBasicBlock::iterator MBBI,
61 unsigned DestReg, int FrameIndex,
62 const TargetRegisterClass *RC) const;
63
64 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
65 SmallVectorImpl &Addr,
66 const TargetRegisterClass *RC,
67 SmallVectorImpl &NewMIs) const;
4968 bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
5069 MachineBasicBlock *&FBB,
5170 std::vector &Cond) const;
5555 : AlphaGenRegisterInfo(Alpha::ADJUSTSTACKDOWN, Alpha::ADJUSTSTACKUP),
5656 TII(tii)
5757 {
58 }
59
60 void
61 AlphaRegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
62 MachineBasicBlock::iterator MI,
63 unsigned SrcReg, bool isKill, int FrameIdx,
64 const TargetRegisterClass *RC) const {
65 //cerr << "Trying to store " << getPrettyName(SrcReg) << " to "
66 // << FrameIdx << "\n";
67 //BuildMI(MBB, MI, Alpha::WTF, 0).addReg(SrcReg);
68 if (RC == Alpha::F4RCRegisterClass)
69 BuildMI(MBB, MI, TII.get(Alpha::STS))
70 .addReg(SrcReg, false, false, isKill)
71 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
72 else if (RC == Alpha::F8RCRegisterClass)
73 BuildMI(MBB, MI, TII.get(Alpha::STT))
74 .addReg(SrcReg, false, false, isKill)
75 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
76 else if (RC == Alpha::GPRCRegisterClass)
77 BuildMI(MBB, MI, TII.get(Alpha::STQ))
78 .addReg(SrcReg, false, false, isKill)
79 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
80 else
81 abort();
82 }
83
84 void AlphaRegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
85 bool isKill,
86 SmallVectorImpl &Addr,
87 const TargetRegisterClass *RC,
88 SmallVectorImpl &NewMIs) const {
89 unsigned Opc = 0;
90 if (RC == Alpha::F4RCRegisterClass)
91 Opc = Alpha::STS;
92 else if (RC == Alpha::F8RCRegisterClass)
93 Opc = Alpha::STT;
94 else if (RC == Alpha::GPRCRegisterClass)
95 Opc = Alpha::STQ;
96 else
97 abort();
98 MachineInstrBuilder MIB =
99 BuildMI(TII.get(Opc)).addReg(SrcReg, false, false, isKill);
100 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
101 MachineOperand &MO = Addr[i];
102 if (MO.isRegister())
103 MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
104 else
105 MIB.addImm(MO.getImm());
106 }
107 NewMIs.push_back(MIB);
108 }
109
110 void
111 AlphaRegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
112 MachineBasicBlock::iterator MI,
113 unsigned DestReg, int FrameIdx,
114 const TargetRegisterClass *RC) const {
115 //cerr << "Trying to load " << getPrettyName(DestReg) << " to "
116 // << FrameIdx << "\n";
117 if (RC == Alpha::F4RCRegisterClass)
118 BuildMI(MBB, MI, TII.get(Alpha::LDS), DestReg)
119 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
120 else if (RC == Alpha::F8RCRegisterClass)
121 BuildMI(MBB, MI, TII.get(Alpha::LDT), DestReg)
122 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
123 else if (RC == Alpha::GPRCRegisterClass)
124 BuildMI(MBB, MI, TII.get(Alpha::LDQ), DestReg)
125 .addFrameIndex(FrameIdx).addReg(Alpha::F31);
126 else
127 abort();
128 }
129
130 void AlphaRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
131 SmallVectorImpl &Addr,
132 const TargetRegisterClass *RC,
133 SmallVectorImpl &NewMIs) const {
134 unsigned Opc = 0;
135 if (RC == Alpha::F4RCRegisterClass)
136 Opc = Alpha::LDS;
137 else if (RC == Alpha::F8RCRegisterClass)
138 Opc = Alpha::LDT;
139 else if (RC == Alpha::GPRCRegisterClass)
140 Opc = Alpha::LDQ;
141 else
142 abort();
143 MachineInstrBuilder MIB =
144 BuildMI(TII.get(Opc), DestReg);
145 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
146 MachineOperand &MO = Addr[i];
147 if (MO.isRegister())
148 MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
149 else
150 MIB.addImm(MO.getImm());
151 }
152 NewMIs.push_back(MIB);
15358 }
15459
15560 MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
2727 AlphaRegisterInfo(const TargetInstrInfo &tii);
2828
2929 /// Code Generation virtual methods...
30 void storeRegToStackSlot(MachineBasicBlock &MBB,
31 MachineBasicBlock::iterator MBBI,
32 unsigned SrcReg, bool isKill, int FrameIndex,
33 const TargetRegisterClass *RC) const;
34
35 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
36 SmallVectorImpl &Addr,
37 const TargetRegisterClass *RC,
38 SmallVectorImpl &NewMIs) const;
39
40 void loadRegFromStackSlot(MachineBasicBlock &MBB,
41 MachineBasicBlock::iterator MBBI,
42 unsigned DestReg, int FrameIndex,
43 const TargetRegisterClass *RC) const;
44
45 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
46 SmallVectorImpl &Addr,
47 const TargetRegisterClass *RC,
48 SmallVectorImpl &NewMIs) const;
49
5030 MachineInstr* foldMemoryOperand(MachineInstr* MI,
5131 SmallVectorImpl &Ops,
5232 int FrameIndex) const;
5636 MachineInstr* LoadMI) const {
5737 return 0;
5838 }
59
60 void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
61 unsigned DestReg, unsigned SrcReg,
62 const TargetRegisterClass *DestRC,
63 const TargetRegisterClass *SrcRC) const;
6439
6540 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
6641 unsigned DestReg, const MachineInstr *Orig) const;
1212
1313 #include "SPURegisterNames.h"
1414 #include "SPUInstrInfo.h"
15 #include "SPUInstrBuilder.h"
1516 #include "SPUTargetMachine.h"
1617 #include "SPUGenInstrInfo.inc"
1718 #include "llvm/CodeGen/MachineInstrBuilder.h"
187188 const TargetRegisterClass *SrcRC) const
188189 {
189190 if (DestRC != SrcRC) {
190 cerr << "SPURegisterInfo::copyRegToReg(): DestRC != SrcRC not supported!\n";
191 cerr << "SPUInstrInfo::copyRegToReg(): DestRC != SrcRC not supported!\n";
191192 abort();
192193 }
193194
216217 abort();
217218 }
218219 }
220
221 void
222 SPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
223 MachineBasicBlock::iterator MI,
224 unsigned SrcReg, bool isKill, int FrameIdx,
225 const TargetRegisterClass *RC) const
226 {
227 MachineOpCode opc;
228 if (RC == SPU::GPRCRegisterClass) {
229 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
230 ? SPU::STQDr128
231 : SPU::STQXr128;
232 } else if (RC == SPU::R64CRegisterClass) {
233 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
234 ? SPU::STQDr64
235 : SPU::STQXr64;
236 } else if (RC == SPU::R64FPRegisterClass) {
237 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
238 ? SPU::STQDr64
239 : SPU::STQXr64;
240 } else if (RC == SPU::R32CRegisterClass) {
241 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
242 ? SPU::STQDr32
243 : SPU::STQXr32;
244 } else if (RC == SPU::R32FPRegisterClass) {
245 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
246 ? SPU::STQDr32
247 : SPU::STQXr32;
248 } else if (RC == SPU::R16CRegisterClass) {
249 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset()) ?
250 SPU::STQDr16
251 : SPU::STQXr16;
252 } else {
253 assert(0 && "Unknown regclass!");
254 abort();
255 }
256
257 addFrameReference(BuildMI(MBB, MI, get(opc))
258 .addReg(SrcReg, false, false, isKill), FrameIdx);
259 }
260
261 void SPUInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
262 bool isKill,
263 SmallVectorImpl &Addr,
264 const TargetRegisterClass *RC,
265 SmallVectorImpl &NewMIs) const {
266 cerr << "storeRegToAddr() invoked!\n";
267 abort();
268
269 if (Addr[0].isFrameIndex()) {
270 /* do what storeRegToStackSlot does here */
271 } else {
272 unsigned Opc = 0;
273 if (RC == SPU::GPRCRegisterClass) {
274 /* Opc = PPC::STW; */
275 } else if (RC == SPU::R16CRegisterClass) {
276 /* Opc = PPC::STD; */
277 } else if (RC == SPU::R32CRegisterClass) {
278 /* Opc = PPC::STFD; */
279 } else if (RC == SPU::R32FPRegisterClass) {
280 /* Opc = PPC::STFD; */
281 } else if (RC == SPU::R64FPRegisterClass) {
282 /* Opc = PPC::STFS; */
283 } else if (RC == SPU::VECREGRegisterClass) {
284 /* Opc = PPC::STVX; */
285 } else {
286 assert(0 && "Unknown regclass!");
287 abort();
288 }
289 MachineInstrBuilder MIB = BuildMI(get(Opc))
290 .addReg(SrcReg, false, false, isKill);
291 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
292 MachineOperand &MO = Addr[i];
293 if (MO.isRegister())
294 MIB.addReg(MO.getReg());
295 else if (MO.isImmediate())
296 MIB.addImm(MO.getImm());
297 else
298 MIB.addFrameIndex(MO.getIndex());
299 }
300 NewMIs.push_back(MIB);
301 }
302 }
303
304 void
305 SPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
306 MachineBasicBlock::iterator MI,
307 unsigned DestReg, int FrameIdx,
308 const TargetRegisterClass *RC) const
309 {
310 MachineOpCode opc;
311 if (RC == SPU::GPRCRegisterClass) {
312 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
313 ? SPU::LQDr128
314 : SPU::LQXr128;
315 } else if (RC == SPU::R64CRegisterClass) {
316 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
317 ? SPU::LQDr64
318 : SPU::LQXr64;
319 } else if (RC == SPU::R64FPRegisterClass) {
320 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
321 ? SPU::LQDr64
322 : SPU::LQXr64;
323 } else if (RC == SPU::R32CRegisterClass) {
324 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
325 ? SPU::LQDr32
326 : SPU::LQXr32;
327 } else if (RC == SPU::R32FPRegisterClass) {
328 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
329 ? SPU::LQDr32
330 : SPU::LQXr32;
331 } else if (RC == SPU::R16CRegisterClass) {
332 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
333 ? SPU::LQDr16
334 : SPU::LQXr16;
335 } else {
336 assert(0 && "Unknown regclass in loadRegFromStackSlot!");
337 abort();
338 }
339
340 addFrameReference(BuildMI(MBB, MI, get(opc)).addReg(DestReg), FrameIdx);
341 }
342
343 /*!
344 \note We are really pessimistic here about what kind of a load we're doing.
345 */
346 void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
347 SmallVectorImpl &Addr,
348 const TargetRegisterClass *RC,
349 SmallVectorImpl &NewMIs)
350 const {
351 cerr << "loadRegToAddr() invoked!\n";
352 abort();
353
354 if (Addr[0].isFrameIndex()) {
355 /* do what loadRegFromStackSlot does here... */
356 } else {
357 unsigned Opc = 0;
358 if (RC == SPU::R8CRegisterClass) {
359 /* do brilliance here */
360 } else if (RC == SPU::R16CRegisterClass) {
361 /* Opc = PPC::LWZ; */
362 } else if (RC == SPU::R32CRegisterClass) {
363 /* Opc = PPC::LD; */
364 } else if (RC == SPU::R32FPRegisterClass) {
365 /* Opc = PPC::LFD; */
366 } else if (RC == SPU::R64FPRegisterClass) {
367 /* Opc = PPC::LFS; */
368 } else if (RC == SPU::VECREGRegisterClass) {
369 /* Opc = PPC::LVX; */
370 } else if (RC == SPU::GPRCRegisterClass) {
371 /* Opc = something else! */
372 } else {
373 assert(0 && "Unknown regclass!");
374 abort();
375 }
376 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
377 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
378 MachineOperand &MO = Addr[i];
379 if (MO.isRegister())
380 MIB.addReg(MO.getReg());
381 else if (MO.isImmediate())
382 MIB.addImm(MO.getImm());
383 else
384 MIB.addFrameIndex(MO.getIndex());
385 }
386 NewMIs.push_back(MIB);
387 }
388 }
389
5050 unsigned DestReg, unsigned SrcReg,
5151 const TargetRegisterClass *DestRC,
5252 const TargetRegisterClass *SrcRC) const;
53
54 //! Store a register to a stack slot, based on its register class.
55 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
56 MachineBasicBlock::iterator MBBI,
57 unsigned SrcReg, bool isKill, int FrameIndex,
58 const TargetRegisterClass *RC) const;
59
60 //! Store a register to an address, based on its register class
61 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
62 SmallVectorImpl &Addr,
63 const TargetRegisterClass *RC,
64 SmallVectorImpl &NewMIs) const;
65
66 //! Load a register from a stack slot, based on its register class.
67 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator MBBI,
69 unsigned DestReg, int FrameIndex,
70 const TargetRegisterClass *RC) const;
71
72 //! Loqad a register from an address, based on its register class
73 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
74 SmallVectorImpl &Addr,
75 const TargetRegisterClass *RC,
76 SmallVectorImpl &NewMIs) const;
5377 };
5478 }
5579
190190 {
191191 }
192192
193 void
194 SPURegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
195 MachineBasicBlock::iterator MI,
196 unsigned SrcReg, bool isKill, int FrameIdx,
197 const TargetRegisterClass *RC) const
198 {
199 MachineOpCode opc;
200 if (RC == SPU::GPRCRegisterClass) {
201 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
202 ? SPU::STQDr128
203 : SPU::STQXr128;
204 } else if (RC == SPU::R64CRegisterClass) {
205 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
206 ? SPU::STQDr64
207 : SPU::STQXr64;
208 } else if (RC == SPU::R64FPRegisterClass) {
209 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
210 ? SPU::STQDr64
211 : SPU::STQXr64;
212 } else if (RC == SPU::R32CRegisterClass) {
213 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
214 ? SPU::STQDr32
215 : SPU::STQXr32;
216 } else if (RC == SPU::R32FPRegisterClass) {
217 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
218 ? SPU::STQDr32
219 : SPU::STQXr32;
220 } else if (RC == SPU::R16CRegisterClass) {
221 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset()) ?
222 SPU::STQDr16
223 : SPU::STQXr16;
224 } else {
225 assert(0 && "Unknown regclass!");
226 abort();
227 }
228
229 addFrameReference(BuildMI(MBB, MI, TII.get(opc))
230 .addReg(SrcReg, false, false, isKill), FrameIdx);
231 }
232
233 void SPURegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
234 bool isKill,
235 SmallVectorImpl &Addr,
236 const TargetRegisterClass *RC,
237 SmallVectorImpl &NewMIs) const {
238 cerr << "storeRegToAddr() invoked!\n";
239 abort();
240
241 if (Addr[0].isFrameIndex()) {
242 /* do what storeRegToStackSlot does here */
243 } else {
244 unsigned Opc = 0;
245 if (RC == SPU::GPRCRegisterClass) {
246 /* Opc = PPC::STW; */
247 } else if (RC == SPU::R16CRegisterClass) {
248 /* Opc = PPC::STD; */
249 } else if (RC == SPU::R32CRegisterClass) {
250 /* Opc = PPC::STFD; */
251 } else if (RC == SPU::R32FPRegisterClass) {
252 /* Opc = PPC::STFD; */
253 } else if (RC == SPU::R64FPRegisterClass) {
254 /* Opc = PPC::STFS; */
255 } else if (RC == SPU::VECREGRegisterClass) {
256 /* Opc = PPC::STVX; */
257 } else {
258 assert(0 && "Unknown regclass!");
259 abort();
260 }
261 MachineInstrBuilder MIB = BuildMI(TII.get(Opc))
262 .addReg(SrcReg, false, false, isKill);
263 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
264 MachineOperand &MO = Addr[i];
265 if (MO.isRegister())
266 MIB.addReg(MO.getReg());
267 else if (MO.isImmediate())
268 MIB.addImm(MO.getImm());
269 else
270 MIB.addFrameIndex(MO.getIndex());
271 }
272 NewMIs.push_back(MIB);
273 }
274 }
275
276 void
277 SPURegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
278 MachineBasicBlock::iterator MI,
279 unsigned DestReg, int FrameIdx,
280 const TargetRegisterClass *RC) const
281 {
282 MachineOpCode opc;
283 if (RC == SPU::GPRCRegisterClass) {
284 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
285 ? SPU::LQDr128
286 : SPU::LQXr128;
287 } else if (RC == SPU::R64CRegisterClass) {
288 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
289 ? SPU::LQDr64
290 : SPU::LQXr64;
291 } else if (RC == SPU::R64FPRegisterClass) {
292 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
293 ? SPU::LQDr64
294 : SPU::LQXr64;
295 } else if (RC == SPU::R32CRegisterClass) {
296 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
297 ? SPU::LQDr32
298 : SPU::LQXr32;
299 } else if (RC == SPU::R32FPRegisterClass) {
300 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
301 ? SPU::LQDr32
302 : SPU::LQXr32;
303 } else if (RC == SPU::R16CRegisterClass) {
304 opc = (FrameIdx < SPUFrameInfo::maxFrameOffset())
305 ? SPU::LQDr16
306 : SPU::LQXr16;
307 } else {
308 assert(0 && "Unknown regclass in loadRegFromStackSlot!");
309 abort();
310 }
311
312 addFrameReference(BuildMI(MBB, MI, TII.get(opc)).addReg(DestReg), FrameIdx);
313 }
314
315 /*!
316 \note We are really pessimistic here about what kind of a load we're doing.
317 */
318 void SPURegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
319 SmallVectorImpl &Addr,
320 const TargetRegisterClass *RC,
321 SmallVectorImpl &NewMIs)
322 const {
323 cerr << "loadRegToAddr() invoked!\n";
324 abort();
325
326 if (Addr[0].isFrameIndex()) {
327 /* do what loadRegFromStackSlot does here... */
328 } else {
329 unsigned Opc = 0;
330 if (RC == SPU::R8CRegisterClass) {
331 /* do brilliance here */
332 } else if (RC == SPU::R16CRegisterClass) {
333 /* Opc = PPC::LWZ; */
334 } else if (RC == SPU::R32CRegisterClass) {
335 /* Opc = PPC::LD; */
336 } else if (RC == SPU::R32FPRegisterClass) {
337 /* Opc = PPC::LFD; */
338 } else if (RC == SPU::R64FPRegisterClass) {
339 /* Opc = PPC::LFS; */
340 } else if (RC == SPU::VECREGRegisterClass) {
341 /* Opc = PPC::LVX; */
342 } else if (RC == SPU::GPRCRegisterClass) {
343 /* Opc = something else! */
344 } else {
345 assert(0 && "Unknown regclass!");
346 abort();
347 }
348 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
349 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
350 MachineOperand &MO = Addr[i];
351 if (MO.isRegister())
352 MIB.addReg(MO.getReg());
353 else if (MO.isImmediate())
354 MIB.addImm(MO.getImm());
355 else
356 MIB.addFrameIndex(MO.getIndex());
357 }
358 NewMIs.push_back(MIB);
359 }
360 }
361
362193 void SPURegisterInfo::reMaterialize(MachineBasicBlock &MBB,
363194 MachineBasicBlock::iterator I,
364195 unsigned DestReg,
3838 e.g. SPU::R14 -> 14.
3939 */
4040 static unsigned getRegisterNumbering(unsigned RegEnum);
41
42 //! Store a register to a stack slot, based on its register class.
43 void storeRegToStackSlot(MachineBasicBlock &MBB,
44 MachineBasicBlock::iterator MBBI,
45 unsigned SrcReg, bool isKill, int FrameIndex,
46 const TargetRegisterClass *RC) const;
47
48 //! Store a register to an address, based on its register class
49 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
50 SmallVectorImpl &Addr,
51 const TargetRegisterClass *RC,
52 SmallVectorImpl &NewMIs) const;
53
54 //! Load a register from a stack slot, based on its register class.
55 void loadRegFromStackSlot(MachineBasicBlock &MBB,
56 MachineBasicBlock::iterator MBBI,
57 unsigned DestReg, int FrameIndex,
58 const TargetRegisterClass *RC) const;
59
60 //! Loqad a register from an address, based on its register class
61 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
62 SmallVectorImpl &Addr,
63 const TargetRegisterClass *RC,
64 SmallVectorImpl &NewMIs) const;
65
66 //! Copy a register to another
67 void copyRegToReg(MachineBasicBlock &MBB,
68 MachineBasicBlock::iterator MI,
69 unsigned DestReg, unsigned SrcReg,
70 const TargetRegisterClass *DestRC,
71 const TargetRegisterClass *SrcRC) const;
7241
7342 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
7443 unsigned DestReg, const MachineInstr *Orig) const;
7373 else // otherwise, MOV works (for both gen. regs and FP regs)
7474 BuildMI(MBB, MI, get(IA64::MOV), DestReg).addReg(SrcReg);
7575 }
76
77 void IA64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
78 MachineBasicBlock::iterator MI,
79 unsigned SrcReg, bool isKill,
80 int FrameIdx,
81 const TargetRegisterClass *RC) const{
82
83 if (RC == IA64::FPRegisterClass) {
84 BuildMI(MBB, MI, get(IA64::STF_SPILL)).addFrameIndex(FrameIdx)
85 .addReg(SrcReg, false, false, isKill);
86 } else if (RC == IA64::GRRegisterClass) {
87 BuildMI(MBB, MI, get(IA64::ST8)).addFrameIndex(FrameIdx)
88 .addReg(SrcReg, false, false, isKill);
89 } else if (RC == IA64::PRRegisterClass) {
90 /* we use IA64::r2 as a temporary register for doing this hackery. */
91 // first we load 0:
92 BuildMI(MBB, MI, get(IA64::MOV), IA64::r2).addReg(IA64::r0);
93 // then conditionally add 1:
94 BuildMI(MBB, MI, get(IA64::CADDIMM22), IA64::r2).addReg(IA64::r2)
95 .addImm(1).addReg(SrcReg, false, false, isKill);
96 // and then store it to the stack
97 BuildMI(MBB, MI, get(IA64::ST8)).addFrameIndex(FrameIdx).addReg(IA64::r2);
98 } else assert(0 &&
99 "sorry, I don't know how to store this sort of reg in the stack\n");
100 }
101
102 void IA64InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
103 bool isKill,
104 SmallVectorImpl &Addr,
105 const TargetRegisterClass *RC,
106 SmallVectorImpl &NewMIs) const {
107 unsigned Opc = 0;
108 if (RC == IA64::FPRegisterClass) {
109 Opc = IA64::STF8;
110 } else if (RC == IA64::GRRegisterClass) {
111 Opc = IA64::ST8;
112 } else if (RC == IA64::PRRegisterClass) {
113 Opc = IA64::ST1;
114 } else {
115 assert(0 &&
116 "sorry, I don't know how to store this sort of reg\n");
117 }
118
119 MachineInstrBuilder MIB = BuildMI(get(Opc));
120 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
121 MachineOperand &MO = Addr[i];
122 if (MO.isRegister())
123 MIB.addReg(MO.getReg());
124 else if (MO.isImmediate())
125 MIB.addImm(MO.getImm());
126 else
127 MIB.addFrameIndex(MO.getIndex());
128 }
129 MIB.addReg(SrcReg, false, false, isKill);
130 NewMIs.push_back(MIB);
131 return;
132
133 }
134
135 void IA64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
136 MachineBasicBlock::iterator MI,
137 unsigned DestReg, int FrameIdx,
138 const TargetRegisterClass *RC)const{
139
140 if (RC == IA64::FPRegisterClass) {
141 BuildMI(MBB, MI, get(IA64::LDF_FILL), DestReg).addFrameIndex(FrameIdx);
142 } else if (RC == IA64::GRRegisterClass) {
143 BuildMI(MBB, MI, get(IA64::LD8), DestReg).addFrameIndex(FrameIdx);
144 } else if (RC == IA64::PRRegisterClass) {
145 // first we load a byte from the stack into r2, our 'predicate hackery'
146 // scratch reg
147 BuildMI(MBB, MI, get(IA64::LD8), IA64::r2).addFrameIndex(FrameIdx);
148 // then we compare it to zero. If it _is_ zero, compare-not-equal to
149 // r0 gives us 0, which is what we want, so that's nice.
150 BuildMI(MBB, MI, get(IA64::CMPNE), DestReg).addReg(IA64::r2).addReg(IA64::r0);
151 } else assert(0 &&
152 "sorry, I don't know how to load this sort of reg from the stack\n");
153 }
154
155 void IA64InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
156 SmallVectorImpl &Addr,
157 const TargetRegisterClass *RC,
158 SmallVectorImpl &NewMIs) const {
159 unsigned Opc = 0;
160 if (RC == IA64::FPRegisterClass) {
161 Opc = IA64::LDF8;
162 } else if (RC == IA64::GRRegisterClass) {
163 Opc = IA64::LD8;
164 } else if (RC == IA64::PRRegisterClass) {
165 Opc = IA64::LD1;
166 } else {
167 assert(0 &&
168 "sorry, I don't know how to store this sort of reg\n");
169 }
170
171 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
172 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
173 MachineOperand &MO = Addr[i];
174 if (MO.isRegister())
175 MIB.addReg(MO.getReg());
176 else if (MO.isImmediate())
177 MIB.addImm(MO.getImm());
178 else
179 MIB.addFrameIndex(MO.getIndex());
180 }
181 NewMIs.push_back(MIB);
182 return;
183 }
4444 unsigned DestReg, unsigned SrcReg,
4545 const TargetRegisterClass *DestRC,
4646 const TargetRegisterClass *SrcRC) const;
47 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
48 MachineBasicBlock::iterator MI,
49 unsigned SrcReg, bool isKill, int FrameIndex,
50 const TargetRegisterClass *RC) const;
51
52 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
53 SmallVectorImpl &Addr,
54 const TargetRegisterClass *RC,
55 SmallVectorImpl &NewMIs) const;
56
57 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
58 MachineBasicBlock::iterator MI,
59 unsigned DestReg, int FrameIndex,
60 const TargetRegisterClass *RC) const;
61
62 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
63 SmallVectorImpl &Addr,
64 const TargetRegisterClass *RC,
65 SmallVectorImpl &NewMIs) const;
4766 };
4867
4968 } // End llvm namespace
3434 IA64RegisterInfo::IA64RegisterInfo(const TargetInstrInfo &tii)
3535 : IA64GenRegisterInfo(IA64::ADJUSTCALLSTACKDOWN, IA64::ADJUSTCALLSTACKUP),
3636 TII(tii) {}
37
38 void IA64RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
39 MachineBasicBlock::iterator MI,
40 unsigned SrcReg, bool isKill,
41 int FrameIdx,
42 const TargetRegisterClass *RC) const{
43
44 if (RC == IA64::FPRegisterClass) {
45 BuildMI(MBB, MI, TII.get(IA64::STF_SPILL)).addFrameIndex(FrameIdx)
46 .addReg(SrcReg, false, false, isKill);
47 } else if (RC == IA64::GRRegisterClass) {
48 BuildMI(MBB, MI, TII.get(IA64::ST8)).addFrameIndex(FrameIdx)
49 .addReg(SrcReg, false, false, isKill);
50 } else if (RC == IA64::PRRegisterClass) {
51 /* we use IA64::r2 as a temporary register for doing this hackery. */
52 // first we load 0:
53 BuildMI(MBB, MI, TII.get(IA64::MOV), IA64::r2).addReg(IA64::r0);
54 // then conditionally add 1:
55 BuildMI(MBB, MI, TII.get(IA64::CADDIMM22), IA64::r2).addReg(IA64::r2)
56 .addImm(1).addReg(SrcReg, false, false, isKill);
57 // and then store it to the stack
58 BuildMI(MBB, MI, TII.get(IA64::ST8)).addFrameIndex(FrameIdx).addReg(IA64::r2);
59 } else assert(0 &&
60 "sorry, I don't know how to store this sort of reg in the stack\n");
61 }
62
63 void IA64RegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
64 bool isKill,
65 SmallVectorImpl &Addr,
66 const TargetRegisterClass *RC,
67 SmallVectorImpl &NewMIs) const {
68 unsigned Opc = 0;
69 if (RC == IA64::FPRegisterClass) {
70 Opc = IA64::STF8;
71 } else if (RC == IA64::GRRegisterClass) {
72 Opc = IA64::ST8;
73 } else if (RC == IA64::PRRegisterClass) {
74 Opc = IA64::ST1;
75 } else {
76 assert(0 &&
77 "sorry, I don't know how to store this sort of reg\n");
78 }
79
80 MachineInstrBuilder MIB = BuildMI(TII.get(Opc));
81 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
82 MachineOperand &MO = Addr[i];
83 if (MO.isRegister())
84 MIB.addReg(MO.getReg());
85 else if (MO.isImmediate())
86 MIB.addImm(MO.getImm());
87 else
88 MIB.addFrameIndex(MO.getIndex());
89 }
90 MIB.addReg(SrcReg, false, false, isKill);
91 NewMIs.push_back(MIB);
92 return;
93
94 }
95
96 void IA64RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
97 MachineBasicBlock::iterator MI,
98 unsigned DestReg, int FrameIdx,
99 const TargetRegisterClass *RC)const{
100
101 if (RC == IA64::FPRegisterClass) {
102 BuildMI(MBB, MI, TII.get(IA64::LDF_FILL), DestReg).addFrameIndex(FrameIdx);
103 } else if (RC == IA64::GRRegisterClass) {
104 BuildMI(MBB, MI, TII.get(IA64::LD8), DestReg).addFrameIndex(FrameIdx);
105 } else if (RC == IA64::PRRegisterClass) {
106 // first we load a byte from the stack into r2, our 'predicate hackery'
107 // scratch reg
108 BuildMI(MBB, MI, TII.get(IA64::LD8), IA64::r2).addFrameIndex(FrameIdx);
109 // then we compare it to zero. If it _is_ zero, compare-not-equal to
110 // r0 gives us 0, which is what we want, so that's nice.
111 BuildMI(MBB, MI, TII.get(IA64::CMPNE), DestReg).addReg(IA64::r2).addReg(IA64::r0);
112 } else assert(0 &&
113 "sorry, I don't know how to load this sort of reg from the stack\n");
114 }
115
116 void IA64RegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
117 SmallVectorImpl &Addr,
118 const TargetRegisterClass *RC,
119 SmallVectorImpl &NewMIs) const {
120 unsigned Opc = 0;
121 if (RC == IA64::FPRegisterClass) {
122 Opc = IA64::LDF8;
123 } else if (RC == IA64::GRRegisterClass) {
124 Opc = IA64::LD8;
125 } else if (RC == IA64::PRRegisterClass) {
126 Opc = IA64::LD1;
127 } else {
128 assert(0 &&
129 "sorry, I don't know how to store this sort of reg\n");
130 }
131
132 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
133 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
134 MachineOperand &MO = Addr[i];
135 if (MO.isRegister())
136 MIB.addReg(MO.getReg());
137 else if (MO.isImmediate())
138 MIB.addImm(MO.getImm());
139 else
140 MIB.addFrameIndex(MO.getIndex());
141 }
142 NewMIs.push_back(MIB);
143 return;
144 }
14537
14638 void IA64RegisterInfo::reMaterialize(MachineBasicBlock &MBB,
14739 MachineBasicBlock::iterator I,
2828 IA64RegisterInfo(const TargetInstrInfo &tii);
2929
3030 /// Code Generation virtual methods...
31 void storeRegToStackSlot(MachineBasicBlock &MBB,
32 MachineBasicBlock::iterator MI,
33 unsigned SrcReg, bool isKill, int FrameIndex,
34 const TargetRegisterClass *RC) const;
35
36 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
37 SmallVectorImpl &Addr,
38 const TargetRegisterClass *RC,
39 SmallVectorImpl &NewMIs) const;
40
41 void loadRegFromStackSlot(MachineBasicBlock &MBB,
42 MachineBasicBlock::iterator MI,
43 unsigned DestReg, int FrameIndex,
44 const TargetRegisterClass *RC) const;
45
46 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
47 SmallVectorImpl &Addr,
48 const TargetRegisterClass *RC,
49 SmallVectorImpl &NewMIs) const;
50
5131 void copyRegToReg(MachineBasicBlock &MBB,
5232 MachineBasicBlock::iterator MI,
5333 unsigned DestReg, unsigned SrcReg,
305305 assert (0 && "Can't copy this register");
306306 }
307307
308 void MipsInstrInfo::
309 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
310 unsigned SrcReg, bool isKill, int FI,
311 const TargetRegisterClass *RC) const
312 {
313 if (RC == Mips::CPURegsRegisterClass)
314 BuildMI(MBB, I, get(Mips::SW)).addReg(SrcReg, false, false, isKill)
315 .addImm(0).addFrameIndex(FI);
316 else
317 assert(0 && "Can't store this register to stack slot");
318 }
319
320 void MipsInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
321 bool isKill,
322 SmallVectorImpl &Addr,
323 const TargetRegisterClass *RC,
324 SmallVectorImpl &NewMIs) const {
325 if (RC != Mips::CPURegsRegisterClass)
326 assert(0 && "Can't store this register");
327 MachineInstrBuilder MIB = BuildMI(get(Mips::SW))
328 .addReg(SrcReg, false, false, isKill);
329 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
330 MachineOperand &MO = Addr[i];
331 if (MO.isRegister())
332 MIB.addReg(MO.getReg());
333 else if (MO.isImmediate())
334 MIB.addImm(MO.getImm());
335 else
336 MIB.addFrameIndex(MO.getIndex());
337 }
338 NewMIs.push_back(MIB);
339 return;
340 }
341
342 void MipsInstrInfo::
343 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
344 unsigned DestReg, int FI,
345 const TargetRegisterClass *RC) const
346 {
347 if (RC == Mips::CPURegsRegisterClass)
348 BuildMI(MBB, I, get(Mips::LW), DestReg).addImm(0).addFrameIndex(FI);
349 else
350 assert(0 && "Can't load this register from stack slot");
351 }
352
353 void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
354 SmallVectorImpl &Addr,
355 const TargetRegisterClass *RC,
356 SmallVectorImpl &NewMIs) const {
357 if (RC != Mips::CPURegsRegisterClass)
358 assert(0 && "Can't load this register");
359 MachineInstrBuilder MIB = BuildMI(get(Mips::LW), DestReg);
360 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
361 MachineOperand &MO = Addr[i];
362 if (MO.isRegister())
363 MIB.addReg(MO.getReg());
364 else if (MO.isImmediate())
365 MIB.addImm(MO.getImm());
366 else
367 MIB.addFrameIndex(MO.getIndex());
368 }
369 NewMIs.push_back(MIB);
370 return;
371 }
372
308373 unsigned MipsInstrInfo::
309374 RemoveBranch(MachineBasicBlock &MBB) const
310375 {
8585 unsigned DestReg, unsigned SrcReg,
8686 const TargetRegisterClass *DestRC,
8787 const TargetRegisterClass *SrcRC) const;
88 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
89 MachineBasicBlock::iterator MBBI,
90 unsigned SrcReg, bool isKill, int FrameIndex,
91 const TargetRegisterClass *RC) const;
92
93 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
94 SmallVectorImpl &Addr,
95 const TargetRegisterClass *RC,
96 SmallVectorImpl &NewMIs) const;
97
98 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
99 MachineBasicBlock::iterator MBBI,
100 unsigned DestReg, int FrameIndex,
101 const TargetRegisterClass *RC) const;
102
103 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
104 SmallVectorImpl &Addr,
105 const TargetRegisterClass *RC,
106 SmallVectorImpl &NewMIs) const;
88107 virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
89108 virtual bool ReverseBranchCondition(std::vector &Cond) const;
90109
8282 }
8383 }
8484
85 void MipsRegisterInfo::
86 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
87 unsigned SrcReg, bool isKill, int FI,
88 const TargetRegisterClass *RC) const
89 {
90 if (RC == Mips::CPURegsRegisterClass)
91 BuildMI(MBB, I, TII.get(Mips::SW)).addReg(SrcReg, false, false, isKill)
92 .addImm(0).addFrameIndex(FI);
93 else
94 assert(0 && "Can't store this register to stack slot");
95 }
96
97 void MipsRegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
98 bool isKill,
99 SmallVectorImpl &Addr,
100 const TargetRegisterClass *RC,
101 SmallVectorImpl &NewMIs) const {
102 if (RC != Mips::CPURegsRegisterClass)
103 assert(0 && "Can't store this register");
104 MachineInstrBuilder MIB = BuildMI(TII.get(Mips::SW))
105 .addReg(SrcReg, false, false, isKill);
106 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
107 MachineOperand &MO = Addr[i];
108 if (MO.isRegister())
109 MIB.addReg(MO.getReg());
110 else if (MO.isImmediate())
111 MIB.addImm(MO.getImm());
112 else
113 MIB.addFrameIndex(MO.getIndex());
114 }
115 NewMIs.push_back(MIB);
116 return;
117 }
118
119 void MipsRegisterInfo::
120 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
121 unsigned DestReg, int FI,
122 const TargetRegisterClass *RC) const
123 {
124 if (RC == Mips::CPURegsRegisterClass)
125 BuildMI(MBB, I, TII.get(Mips::LW), DestReg).addImm(0).addFrameIndex(FI);
126 else
127 assert(0 && "Can't load this register from stack slot");
128 }
129
130 void MipsRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
131 SmallVectorImpl &Addr,
132 const TargetRegisterClass *RC,
133 SmallVectorImpl &NewMIs) const {
134 if (RC != Mips::CPURegsRegisterClass)
135 assert(0 && "Can't load this register");
136 MachineInstrBuilder MIB = BuildMI(TII.get(Mips::LW), DestReg);
137 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
138 MachineOperand &MO = Addr[i];
139 if (MO.isRegister())
140 MIB.addReg(MO.getReg());
141 else if (MO.isImmediate())
142 MIB.addImm(MO.getImm());
143 else
144 MIB.addFrameIndex(MO.getIndex());
145 }
146 NewMIs.push_back(MIB);
147 return;
148 }
149
15085 void MipsRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
15186 MachineBasicBlock::iterator I,
15287 unsigned DestReg,
3131 static unsigned getRegisterNumbering(unsigned RegEnum);
3232
3333 /// Code Generation virtual methods...
34 void storeRegToStackSlot(MachineBasicBlock &MBB,
35 MachineBasicBlock::iterator MBBI,
36 unsigned SrcReg, bool isKill, int FrameIndex,
37 const TargetRegisterClass *RC) const;
38
39 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
40 SmallVectorImpl &Addr,
41 const TargetRegisterClass *RC,
42 SmallVectorImpl &NewMIs) const;
43
44 void loadRegFromStackSlot(MachineBasicBlock &MBB,
45 MachineBasicBlock::iterator MBBI,
46 unsigned DestReg, int FrameIndex,
47 const TargetRegisterClass *RC) const;
48
49 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
50 SmallVectorImpl &Addr,
51 const TargetRegisterClass *RC,
52 SmallVectorImpl &NewMIs) const;
53
5434 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
5535 unsigned DestReg, const MachineInstr *Orig) const;
5636
6343 MachineInstr* LoadMI) const {
6444 return 0;
6545 }
66
67 void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
68 unsigned DestReg, unsigned SrcReg,
69 const TargetRegisterClass *DestRC,
70 const TargetRegisterClass *SrcRC) const;
71
7246
7347 const unsigned *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
7448
1111 //===----------------------------------------------------------------------===//
1212
1313 #include "PPCInstrInfo.h"
14 #include "PPCInstrBuilder.h"
1415 #include "PPCPredicates.h"
1516 #include "PPCGenInstrInfo.inc"
1617 #include "PPCTargetMachine.h"
304305 }
305306 }
306307
308 static void StoreRegToStackSlot(const TargetInstrInfo &TII,
309 unsigned SrcReg, bool isKill, int FrameIdx,
310 const TargetRegisterClass *RC,
311 SmallVectorImpl &NewMIs) {
312 if (RC == PPC::GPRCRegisterClass) {
313 if (SrcReg != PPC::LR) {
314 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
315 .addReg(SrcReg, false, false, isKill), FrameIdx));
316 } else {
317 // FIXME: this spills LR immediately to memory in one step. To do this,
318 // we use R11, which we know cannot be used in the prolog/epilog. This is
319 // a hack.
320 NewMIs.push_back(BuildMI(TII.get(PPC::MFLR), PPC::R11));
321 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
322 .addReg(PPC::R11, false, false, isKill), FrameIdx));
323 }
324 } else if (RC == PPC::G8RCRegisterClass) {
325 if (SrcReg != PPC::LR8) {
326 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STD))
327 .addReg(SrcReg, false, false, isKill), FrameIdx));
328 } else {
329 // FIXME: this spills LR immediately to memory in one step. To do this,
330 // we use R11, which we know cannot be used in the prolog/epilog. This is
331 // a hack.
332 NewMIs.push_back(BuildMI(TII.get(PPC::MFLR8), PPC::X11));
333 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STD))
334 .addReg(PPC::X11, false, false, isKill), FrameIdx));
335 }
336 } else if (RC == PPC::F8RCRegisterClass) {
337 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STFD))
338 .addReg(SrcReg, false, false, isKill), FrameIdx));
339 } else if (RC == PPC::F4RCRegisterClass) {
340 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STFS))
341 .addReg(SrcReg, false, false, isKill), FrameIdx));
342 } else if (RC == PPC::CRRCRegisterClass) {
343 // FIXME: We use R0 here, because it isn't available for RA.
344 // We need to store the CR in the low 4-bits of the saved value. First,
345 // issue a MFCR to save all of the CRBits.
346 NewMIs.push_back(BuildMI(TII.get(PPC::MFCR), PPC::R0));
347
348 // If the saved register wasn't CR0, shift the bits left so that they are in
349 // CR0's slot.
350 if (SrcReg != PPC::CR0) {
351 unsigned ShiftBits = PPCRegisterInfo::getRegisterNumbering(SrcReg)*4;
352 // rlwinm r0, r0, ShiftBits, 0, 31.
353 NewMIs.push_back(BuildMI(TII.get(PPC::RLWINM), PPC::R0)
354 .addReg(PPC::R0).addImm(ShiftBits).addImm(0).addImm(31));
355 }
356
357 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
358 .addReg(PPC::R0, false, false, isKill), FrameIdx));
359 } else if (RC == PPC::VRRCRegisterClass) {
360 // We don't have indexed addressing for vector loads. Emit:
361 // R0 = ADDI FI#
362 // STVX VAL, 0, R0
363 //
364 // FIXME: We use R0 here, because it isn't available for RA.
365 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::ADDI), PPC::R0),
366 FrameIdx, 0, 0));
367 NewMIs.push_back(BuildMI(TII.get(PPC::STVX))
368 .addReg(SrcReg, false, false, isKill).addReg(PPC::R0).addReg(PPC::R0));
369 } else {
370 assert(0 && "Unknown regclass!");
371 abort();
372 }
373 }
374
375 void
376 PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
377 MachineBasicBlock::iterator MI,
378 unsigned SrcReg, bool isKill, int FrameIdx,
379 const TargetRegisterClass *RC) const {
380 SmallVector NewMIs;
381 StoreRegToStackSlot(*this, SrcReg, isKill, FrameIdx, RC, NewMIs);
382 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
383 MBB.insert(MI, NewMIs[i]);
384 }
385
386 void PPCInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
387 bool isKill,
388 SmallVectorImpl &Addr,
389 const TargetRegisterClass *RC,
390 SmallVectorImpl &NewMIs) const {
391 if (Addr[0].isFrameIndex()) {
392 StoreRegToStackSlot(*this, SrcReg, isKill, Addr[0].getIndex(), RC, NewMIs);
393 return;
394 }
395
396 unsigned Opc = 0;
397 if (RC == PPC::GPRCRegisterClass) {
398 Opc = PPC::STW;
399 } else if (RC == PPC::G8RCRegisterClass) {
400 Opc = PPC::STD;
401 } else if (RC == PPC::F8RCRegisterClass) {
402 Opc = PPC::STFD;
403 } else if (RC == PPC::F4RCRegisterClass) {
404 Opc = PPC::STFS;
405 } else if (RC == PPC::VRRCRegisterClass) {
406 Opc = PPC::STVX;
407 } else {
408 assert(0 && "Unknown regclass!");
409 abort();
410 }
411 MachineInstrBuilder MIB = BuildMI(get(Opc))
412 .addReg(SrcReg, false, false, isKill);
413 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
414 MachineOperand &MO = Addr[i];
415 if (MO.isRegister())
416 MIB.addReg(MO.getReg());
417 else if (MO.isImmediate())
418 MIB.addImm(MO.getImm());
419 else
420 MIB.addFrameIndex(MO.getIndex());
421 }
422 NewMIs.push_back(MIB);
423 return;
424 }
425
426 static void LoadRegFromStackSlot(const TargetInstrInfo &TII,
427 unsigned DestReg, int FrameIdx,
428 const TargetRegisterClass *RC,
429 SmallVectorImpl &NewMIs) {
430 if (RC == PPC::GPRCRegisterClass) {
431 if (DestReg != PPC::LR) {
432 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), DestReg),
433 FrameIdx));
434 } else {
435 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), PPC::R11),
436 FrameIdx));
437 NewMIs.push_back(BuildMI(TII.get(PPC::MTLR)).addReg(PPC::R11));
438 }
439 } else if (RC == PPC::G8RCRegisterClass) {
440 if (DestReg != PPC::LR8) {
441 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LD), DestReg),
442 FrameIdx));
443 } else {
444 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LD), PPC::R11),
445 FrameIdx));
446 NewMIs.push_back(BuildMI(TII.get(PPC::MTLR8)).addReg(PPC::R11));
447 }
448 } else if (RC == PPC::F8RCRegisterClass) {
449 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LFD), DestReg),
450 FrameIdx));
451 } else if (RC == PPC::F4RCRegisterClass) {
452 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LFS), DestReg),
453 FrameIdx));
454 } else if (RC == PPC::CRRCRegisterClass) {
455 // FIXME: We use R0 here, because it isn't available for RA.
456 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), PPC::R0),
457 FrameIdx));
458
459 // If the reloaded register isn't CR0, shift the bits right so that they are
460 // in the right CR's slot.
461 if (DestReg != PPC::CR0) {
462 unsigned ShiftBits = PPCRegisterInfo::getRegisterNumbering(DestReg)*4;
463 // rlwinm r11, r11, 32-ShiftBits, 0, 31.
464 NewMIs.push_back(BuildMI(TII.get(PPC::RLWINM), PPC::R0)
465 .addReg(PPC::R0).addImm(32-ShiftBits).addImm(0).addImm(31));
466 }
467
468 NewMIs.push_back(BuildMI(TII.get(PPC::MTCRF), DestReg).addReg(PPC::R0));
469 } else if (RC == PPC::VRRCRegisterClass) {
470 // We don't have indexed addressing for vector loads. Emit:
471 // R0 = ADDI FI#
472 // Dest = LVX 0, R0
473 //
474 // FIXME: We use R0 here, because it isn't available for RA.
475 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::ADDI), PPC::R0),
476 FrameIdx, 0, 0));
477 NewMIs.push_back(BuildMI(TII.get(PPC::LVX),DestReg).addReg(PPC::R0)
478 .addReg(PPC::R0));
479 } else {
480 assert(0 && "Unknown regclass!");
481 abort();
482 }
483 }
484
485 void
486 PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
487 MachineBasicBlock::iterator MI,
488 unsigned DestReg, int FrameIdx,
489 const TargetRegisterClass *RC) const {
490 SmallVector NewMIs;
491 LoadRegFromStackSlot(*this, DestReg, FrameIdx, RC, NewMIs);
492 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
493 MBB.insert(MI, NewMIs[i]);
494 }
495
496 void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
497 SmallVectorImpl &Addr,
498 const TargetRegisterClass *RC,
499 SmallVectorImpl &NewMIs) const{
500 if (Addr[0].isFrameIndex()) {
501 LoadRegFromStackSlot(*this, DestReg, Addr[0].getIndex(), RC, NewMIs);
502 return;
503 }
504
505 unsigned Opc = 0;
506 if (RC == PPC::GPRCRegisterClass) {
507 assert(DestReg != PPC::LR && "Can't handle this yet!");
508 Opc = PPC::LWZ;
509 } else if (RC == PPC::G8RCRegisterClass) {
510 assert(DestReg != PPC::LR8 && "Can't handle this yet!");
511 Opc = PPC::LD;
512 } else if (RC == PPC::F8RCRegisterClass) {
513 Opc = PPC::LFD;
514 } else if (RC == PPC::F4RCRegisterClass) {
515 Opc = PPC::LFS;
516 } else if (RC == PPC::VRRCRegisterClass) {
517 Opc = PPC::LVX;
518 } else {
519 assert(0 && "Unknown regclass!");
520 abort();
521 }
522 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
523 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
524 MachineOperand &MO = Addr[i];
525 if (MO.isRegister())
526 MIB.addReg(MO.getReg());
527 else if (MO.isImmediate())
528 MIB.addImm(MO.getImm());
529 else
530 MIB.addFrameIndex(MO.getIndex());
531 }
532 NewMIs.push_back(MIB);
533 return;
534 }
535
536
307537 bool PPCInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
308538 if (MBB.empty()) return false;
309539
102102 virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
103103 MachineBasicBlock *FBB,
104104 const std::vector &Cond) const;
105 void copyRegToReg(MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MI,
107 unsigned DestReg, unsigned SrcReg,
108 const TargetRegisterClass *DestRC,
109 const TargetRegisterClass *SrcRC) const;
105 virtual void copyRegToReg(MachineBasicBlock &MBB,
106 MachineBasicBlock::iterator MI,
107 unsigned DestReg, unsigned SrcReg,
108 const TargetRegisterClass *DestRC,
109 const TargetRegisterClass *SrcRC) const;
110
111 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
112 MachineBasicBlock::iterator MBBI,
113 unsigned SrcReg, bool isKill, int FrameIndex,
114 const TargetRegisterClass *RC) const;
115
116 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
117 SmallVectorImpl &Addr,
118 const TargetRegisterClass *RC,
119 SmallVectorImpl &NewMIs) const;
120
121 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
122 MachineBasicBlock::iterator MBBI,
123 unsigned DestReg, int FrameIndex,
124 const TargetRegisterClass *RC) const;
125
126 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
127 SmallVectorImpl &Addr,
128 const TargetRegisterClass *RC,
129 SmallVectorImpl &NewMIs) const;
130
110131 virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
111132 virtual bool ReverseBranchCondition(std::vector &Cond) const;
112133 };
103103 ImmToIdxMap[PPC::ADDI8] = PPC::ADD8; ImmToIdxMap[PPC::STD_32] = PPC::STDX_32;
104104 }
105105
106 static void StoreRegToStackSlot(const TargetInstrInfo &TII,
107 unsigned SrcReg, bool isKill, int FrameIdx,
108 const TargetRegisterClass *RC,
109 SmallVectorImpl &NewMIs) {
110 if (RC == PPC::GPRCRegisterClass) {
111 if (SrcReg != PPC::LR) {
112 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
113 .addReg(SrcReg, false, false, isKill), FrameIdx));
114 } else {
115 // FIXME: this spills LR immediately to memory in one step. To do this,
116 // we use R11, which we know cannot be used in the prolog/epilog. This is
117 // a hack.
118 NewMIs.push_back(BuildMI(TII.get(PPC::MFLR), PPC::R11));
119 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
120 .addReg(PPC::R11, false, false, isKill), FrameIdx));
121 }
122 } else if (RC == PPC::G8RCRegisterClass) {
123 if (SrcReg != PPC::LR8) {
124 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STD))
125 .addReg(SrcReg, false, false, isKill), FrameIdx));
126 } else {
127 // FIXME: this spills LR immediately to memory in one step. To do this,
128 // we use R11, which we know cannot be used in the prolog/epilog. This is
129 // a hack.
130 NewMIs.push_back(BuildMI(TII.get(PPC::MFLR8), PPC::X11));
131 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STD))
132 .addReg(PPC::X11, false, false, isKill), FrameIdx));
133 }
134 } else if (RC == PPC::F8RCRegisterClass) {
135 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STFD))
136 .addReg(SrcReg, false, false, isKill), FrameIdx));
137 } else if (RC == PPC::F4RCRegisterClass) {
138 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STFS))
139 .addReg(SrcReg, false, false, isKill), FrameIdx));
140 } else if (RC == PPC::CRRCRegisterClass) {
141 // FIXME: We use R0 here, because it isn't available for RA.
142 // We need to store the CR in the low 4-bits of the saved value. First,
143 // issue a MFCR to save all of the CRBits.
144 NewMIs.push_back(BuildMI(TII.get(PPC::MFCR), PPC::R0));
145
146 // If the saved register wasn't CR0, shift the bits left so that they are in
147 // CR0's slot.
148 if (SrcReg != PPC::CR0) {
149 unsigned ShiftBits = PPCRegisterInfo::getRegisterNumbering(SrcReg)*4;
150 // rlwinm r0, r0, ShiftBits, 0, 31.
151 NewMIs.push_back(BuildMI(TII.get(PPC::RLWINM), PPC::R0)
152 .addReg(PPC::R0).addImm(ShiftBits).addImm(0).addImm(31));
153 }
154
155 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::STW))
156 .addReg(PPC::R0, false, false, isKill), FrameIdx));
157 } else if (RC == PPC::VRRCRegisterClass) {
158 // We don't have indexed addressing for vector loads. Emit:
159 // R0 = ADDI FI#
160 // STVX VAL, 0, R0
161 //
162 // FIXME: We use R0 here, because it isn't available for RA.
163 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::ADDI), PPC::R0),
164 FrameIdx, 0, 0));
165 NewMIs.push_back(BuildMI(TII.get(PPC::STVX))
166 .addReg(SrcReg, false, false, isKill).addReg(PPC::R0).addReg(PPC::R0));
167 } else {
168 assert(0 && "Unknown regclass!");
169 abort();
170 }
171 }
172
173 void
174 PPCRegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
175 MachineBasicBlock::iterator MI,
176 unsigned SrcReg, bool isKill, int FrameIdx,
177 const TargetRegisterClass *RC) const {
178 SmallVector NewMIs;
179 StoreRegToStackSlot(TII, SrcReg, isKill, FrameIdx, RC, NewMIs);
180 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
181 MBB.insert(MI, NewMIs[i]);
182 }
183
184 void PPCRegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
185 bool isKill,
186 SmallVectorImpl &Addr,
187 const TargetRegisterClass *RC,
188 SmallVectorImpl &NewMIs) const {
189 if (Addr[0].isFrameIndex()) {
190 StoreRegToStackSlot(TII, SrcReg, isKill, Addr[0].getIndex(), RC, NewMIs);
191 return;
192 }
193
194 unsigned Opc = 0;
195 if (RC == PPC::GPRCRegisterClass) {
196 Opc = PPC::STW;
197 } else if (RC == PPC::G8RCRegisterClass) {
198 Opc = PPC::STD;
199 } else if (RC == PPC::F8RCRegisterClass) {
200 Opc = PPC::STFD;
201 } else if (RC == PPC::F4RCRegisterClass) {
202 Opc = PPC::STFS;
203 } else if (RC == PPC::VRRCRegisterClass) {
204 Opc = PPC::STVX;
205 } else {
206 assert(0 && "Unknown regclass!");
207 abort();
208 }
209 MachineInstrBuilder MIB = BuildMI(TII.get(Opc))
210 .addReg(SrcReg, false, false, isKill);
211 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
212 MachineOperand &MO = Addr[i];
213 if (MO.isRegister())
214 MIB.addReg(MO.getReg());
215 else if (MO.isImmediate())
216 MIB.addImm(MO.getImm());
217 else
218 MIB.addFrameIndex(MO.getIndex());
219 }
220 NewMIs.push_back(MIB);
221 return;
222 }
223
224 static void LoadRegFromStackSlot(const TargetInstrInfo &TII,
225 unsigned DestReg, int FrameIdx,
226 const TargetRegisterClass *RC,
227 SmallVectorImpl &NewMIs) {
228 if (RC == PPC::GPRCRegisterClass) {
229 if (DestReg != PPC::LR) {
230 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), DestReg),
231 FrameIdx));
232 } else {
233 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), PPC::R11),
234 FrameIdx));
235 NewMIs.push_back(BuildMI(TII.get(PPC::MTLR)).addReg(PPC::R11));
236 }
237 } else if (RC == PPC::G8RCRegisterClass) {
238 if (DestReg != PPC::LR8) {
239 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LD), DestReg),
240 FrameIdx));
241 } else {
242 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LD), PPC::R11),
243 FrameIdx));
244 NewMIs.push_back(BuildMI(TII.get(PPC::MTLR8)).addReg(PPC::R11));
245 }
246 } else if (RC == PPC::F8RCRegisterClass) {
247 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LFD), DestReg),
248 FrameIdx));
249 } else if (RC == PPC::F4RCRegisterClass) {
250 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LFS), DestReg),
251 FrameIdx));
252 } else if (RC == PPC::CRRCRegisterClass) {
253 // FIXME: We use R0 here, because it isn't available for RA.
254 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::LWZ), PPC::R0),
255 FrameIdx));
256
257 // If the reloaded register isn't CR0, shift the bits right so that they are
258 // in the right CR's slot.
259 if (DestReg != PPC::CR0) {
260 unsigned ShiftBits = PPCRegisterInfo::getRegisterNumbering(DestReg)*4;
261 // rlwinm r11, r11, 32-ShiftBits, 0, 31.
262 NewMIs.push_back(BuildMI(TII.get(PPC::RLWINM), PPC::R0)
263 .addReg(PPC::R0).addImm(32-ShiftBits).addImm(0).addImm(31));
264 }
265
266 NewMIs.push_back(BuildMI(TII.get(PPC::MTCRF), DestReg).addReg(PPC::R0));
267 } else if (RC == PPC::VRRCRegisterClass) {
268 // We don't have indexed addressing for vector loads. Emit:
269 // R0 = ADDI FI#
270 // Dest = LVX 0, R0
271 //
272 // FIXME: We use R0 here, because it isn't available for RA.
273 NewMIs.push_back(addFrameReference(BuildMI(TII.get(PPC::ADDI), PPC::R0),
274 FrameIdx, 0, 0));
275 NewMIs.push_back(BuildMI(TII.get(PPC::LVX),DestReg).addReg(PPC::R0)
276 .addReg(PPC::R0));
277 } else {
278 assert(0 && "Unknown regclass!");
279 abort();
280 }
281 }
282
283 void
284 PPCRegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
285 MachineBasicBlock::iterator MI,
286 unsigned DestReg, int FrameIdx,
287 const TargetRegisterClass *RC) const {
288 SmallVector NewMIs;
289 LoadRegFromStackSlot(TII, DestReg, FrameIdx, RC, NewMIs);
290 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
291 MBB.insert(MI, NewMIs[i]);
292 }
293
294 void PPCRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
295 SmallVectorImpl &Addr,
296 const TargetRegisterClass *RC,
297 SmallVectorImpl &NewMIs) const{
298 if (Addr[0].isFrameIndex()) {
299 LoadRegFromStackSlot(TII, DestReg, Addr[0].getIndex(), RC, NewMIs);
300 return;
301 }
302
303 unsigned Opc = 0;
304 if (RC == PPC::GPRCRegisterClass) {
305 assert(DestReg != PPC::LR && "Can't handle this yet!");
306 Opc = PPC::LWZ;
307 } else if (RC == PPC::G8RCRegisterClass) {
308 assert(DestReg != PPC::LR8 && "Can't handle this yet!");
309 Opc = PPC::LD;
310 } else if (RC == PPC::F8RCRegisterClass) {
311 Opc = PPC::LFD;
312 } else if (RC == PPC::F4RCRegisterClass) {
313 Opc = PPC::LFS;
314 } else if (RC == PPC::VRRCRegisterClass) {
315 Opc = PPC::LVX;
316 } else {
317 assert(0 && "Unknown regclass!");
318 abort();
319 }
320 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
321 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
322 MachineOperand &MO = Addr[i];
323 if (MO.isRegister())
324 MIB.addReg(MO.getReg());
325 else if (MO.isImmediate())
326 MIB.addImm(MO.getImm());
327 else
328 MIB.addFrameIndex(MO.getIndex());
329 }
330 NewMIs.push_back(MIB);
331 return;
332 }
333
334106 void PPCRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
335107 MachineBasicBlock::iterator I,
336108 unsigned DestReg,
3434 static unsigned getRegisterNumbering(unsigned RegEnum);
3535
3636 /// Code Generation virtual methods...
37 void storeRegToStackSlot(MachineBasicBlock &MBB,
38 MachineBasicBlock::iterator MBBI,
39 unsigned SrcReg, bool isKill, int FrameIndex,
40 const TargetRegisterClass *RC) const;
41
42 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
43 SmallVectorImpl &Addr,
44 const TargetRegisterClass *RC,
45 SmallVectorImpl &NewMIs) const;
46
47 void loadRegFromStackSlot(MachineBasicBlock &MBB,
48 MachineBasicBlock::iterator MBBI,
49 unsigned DestReg, int FrameIndex,
50 const TargetRegisterClass *RC) const;
51
52 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
53 SmallVectorImpl &Addr,
54 const TargetRegisterClass *RC,
55 SmallVectorImpl &NewMIs) const;
56
57 void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
58 unsigned DestReg, unsigned SrcReg,
59 const TargetRegisterClass *DestRC,
60 const TargetRegisterClass *SrcRC) const;
61
6237 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
6338 unsigned DestReg, const MachineInstr *Orig) const;
6439
128128 else
129129 assert (0 && "Can't copy this register");
130130 }
131
132 void SparcInstrInfo::
133 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
134 unsigned SrcReg, bool isKill, int FI,
135 const TargetRegisterClass *RC) const {
136 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
137 if (RC == SP::IntRegsRegisterClass)
138 BuildMI(MBB, I, get(SP::STri)).addFrameIndex(FI).addImm(0)
139 .addReg(SrcReg, false, false, isKill);
140 else if (RC == SP::FPRegsRegisterClass)
141 BuildMI(MBB, I, get(SP::STFri)).addFrameIndex(FI).addImm(0)
142 .addReg(SrcReg, false, false, isKill);
143 else if (RC == SP::DFPRegsRegisterClass)
144 BuildMI(MBB, I, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
145 .addReg(SrcReg, false, false, isKill);
146 else
147 assert(0 && "Can't store this register to stack slot");
148 }
149
150 void SparcInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
151 bool isKill,
152 SmallVectorImpl &Addr,
153 const TargetRegisterClass *RC,
154 SmallVectorImpl &NewMIs) const {
155 unsigned Opc = 0;
156 if (RC == SP::IntRegsRegisterClass)
157 Opc = SP::STri;
158 else if (RC == SP::FPRegsRegisterClass)
159 Opc = SP::STFri;
160 else if (RC == SP::DFPRegsRegisterClass)
161 Opc = SP::STDFri;
162 else
163 assert(0 && "Can't load this register");
164 MachineInstrBuilder MIB = BuildMI(get(Opc));
165 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
166 MachineOperand &MO = Addr[i];
167 if (MO.isRegister())
168 MIB.addReg(MO.getReg());
169 else if (MO.isImmediate())
170 MIB.addImm(MO.getImm());
171 else {
172 assert(MO.isFI());
173 MIB.addFrameIndex(MO.getIndex());
174 }
175 }
176 MIB.addReg(SrcReg, false, false, isKill);
177 NewMIs.push_back(MIB);
178 return;
179 }
180
181 void SparcInstrInfo::
182 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
183 unsigned DestReg, int FI,
184 const TargetRegisterClass *RC) const {
185 if (RC == SP::IntRegsRegisterClass)
186 BuildMI(MBB, I, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0);
187 else if (RC == SP::FPRegsRegisterClass)
188 BuildMI(MBB, I, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0);
189 else if (RC == SP::DFPRegsRegisterClass)
190 BuildMI(MBB, I, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0);
191 else
192 assert(0 && "Can't load this register from stack slot");
193 }
194
195 void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
196 SmallVectorImpl &Addr,
197 const TargetRegisterClass *RC,
198 SmallVectorImpl &NewMIs) const {
199 unsigned Opc = 0;
200 if (RC == SP::IntRegsRegisterClass)
201 Opc = SP::LDri;
202 else if (RC == SP::FPRegsRegisterClass)
203 Opc = SP::LDFri;
204 else if (RC == SP::DFPRegsRegisterClass)
205 Opc = SP::LDDFri;
206 else
207 assert(0 && "Can't load this register");
208 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
209 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
210 MachineOperand &MO = Addr[i];
211 if (MO.isReg())
212 MIB.addReg(MO.getReg());
213 else if (MO.isImm())
214 MIB.addImm(MO.getImm());
215 else {
216 assert(MO.isFI());
217 MIB.addFrameIndex(MO.getIndex());
218 }
219 }
220 NewMIs.push_back(MIB);
221 return;
222 }
7272 unsigned DestReg, unsigned SrcReg,
7373 const TargetRegisterClass *DestRC,
7474 const TargetRegisterClass *SrcRC) const;
75
76 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
77 MachineBasicBlock::iterator MBBI,
78 unsigned SrcReg, bool isKill, int FrameIndex,
79 const TargetRegisterClass *RC) const;
80
81 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
82 SmallVectorImpl &Addr,
83 const TargetRegisterClass *RC,
84 SmallVectorImpl &NewMIs) const;
85
86 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
87 MachineBasicBlock::iterator MBBI,
88 unsigned DestReg, int FrameIndex,
89 const TargetRegisterClass *RC) const;
90
91 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
92 SmallVectorImpl &Addr,
93 const TargetRegisterClass *RC,
94 SmallVectorImpl &NewMIs) const;
7595 };
7696
7797 }
2727 const TargetInstrInfo &tii)
2828 : SparcGenRegisterInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP),
2929 Subtarget(st), TII(tii) {
30 }
31
32 void SparcRegisterInfo::
33 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
34 unsigned SrcReg, bool isKill, int FI,
35 const TargetRegisterClass *RC) const {
36 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
37 if (RC == SP::IntRegsRegisterClass)
38 BuildMI(MBB, I, TII.get(SP::STri)).addFrameIndex(FI).addImm(0)
39 .addReg(SrcReg, false, false, isKill);
40 else if (RC == SP::FPRegsRegisterClass)
41 BuildMI(MBB, I, TII.get(SP::STFri)).addFrameIndex(FI).addImm(0)
42 .addReg(SrcReg, false, false, isKill);
43 else if (RC == SP::DFPRegsRegisterClass)
44 BuildMI(MBB, I, TII.get(SP::STDFri)).addFrameIndex(FI).addImm(0)
45 .addReg(SrcReg, false, false, isKill);
46 else
47 assert(0 && "Can't store this register to stack slot");
48 }
49
50 void SparcRegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
51 bool isKill,
52 SmallVectorImpl &Addr,
53 const TargetRegisterClass *RC,
54 SmallVectorImpl &NewMIs) const {
55 unsigned Opc = 0;
56 if (RC == SP::IntRegsRegisterClass)
57 Opc = SP::STri;
58 else if (RC == SP::FPRegsRegisterClass)
59 Opc = SP::STFri;
60 else if (RC == SP::DFPRegsRegisterClass)
61 Opc = SP::STDFri;
62 else
63 assert(0 && "Can't load this register");
64 MachineInstrBuilder MIB = BuildMI(TII.get(Opc));
65 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
66 MachineOperand &MO = Addr[i];
67 if (MO.isRegister())
68 MIB.addReg(MO.getReg());
69 else if (MO.isImmediate())
70 MIB.addImm(MO.getImm());
71 else {
72 assert(MO.isFI());
73 MIB.addFrameIndex(MO.getIndex());
74 }
75 }
76 MIB.addReg(SrcReg, false, false, isKill);
77 NewMIs.push_back(MIB);
78 return;
79 }
80
81 void SparcRegisterInfo::
82 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
83 unsigned DestReg, int FI,
84 const TargetRegisterClass *RC) const {
85 if (RC == SP::IntRegsRegisterClass)
86 BuildMI(MBB, I, TII.get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0);
87 else if (RC == SP::FPRegsRegisterClass)
88 BuildMI(MBB, I, TII.get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0);
89 else if (RC == SP::DFPRegsRegisterClass)
90 BuildMI(MBB, I, TII.get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0);
91 else
92 assert(0 && "Can't load this register from stack slot");
93 }
94
95 void SparcRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
96 SmallVectorImpl &Addr,
97 const TargetRegisterClass *RC,
98 SmallVectorImpl &NewMIs) const {
99 unsigned Opc = 0;
100 if (RC == SP::IntRegsRegisterClass)
101 Opc = SP::LDri;
102 else if (RC == SP::FPRegsRegisterClass)
103 Opc = SP::LDFri;
104 else if (RC == SP::DFPRegsRegisterClass)
105 Opc = SP::LDDFri;
106 else
107 assert(0 && "Can't load this register");
108 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
109 for (unsigned i = 0, e = Addr.size(); i != e; ++i) {
110 MachineOperand &MO = Addr[i];
111 if (MO.isReg())
112 MIB.addReg(MO.getReg());
113 else if (MO.isImm())
114 MIB.addImm(MO.getImm());
115 else {
116 assert(MO.isFI());
117 MIB.addFrameIndex(MO.getIndex());
118 }
119 }
120 NewMIs.push_back(MIB);
121 return;
12230 }
12331
12432 void SparcRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
2828
2929 SparcRegisterInfo(SparcSubtarget &st, const TargetInstrInfo &tii);
3030
31 /// Code Generation virtual methods...
32 void storeRegToStackSlot(MachineBasicBlock &MBB,
33 MachineBasicBlock::iterator MBBI,
34 unsigned SrcReg, bool isKill, int FrameIndex,
35 const TargetRegisterClass *RC) const;
36
37 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
38 SmallVectorImpl &Addr,
39 const TargetRegisterClass *RC,
40 SmallVectorImpl &NewMIs) const;
41
42 void loadRegFromStackSlot(MachineBasicBlock &MBB,
43 MachineBasicBlock::iterator MBBI,
44 unsigned DestReg, int FrameIndex,
45 const TargetRegisterClass *RC) const;
46
47 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
48 SmallVectorImpl &Addr,
49 const TargetRegisterClass *RC,
50 SmallVectorImpl &NewMIs) const;
51
52 void copyRegToReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
53 unsigned DestReg, unsigned SrcReg,
54 const TargetRegisterClass *DestRC,
55 const TargetRegisterClass *SrcRC) const;
56
31 /// Code Generation virtual methods...
5732 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
5833 unsigned DestReg, const MachineInstr *Orig) const;
5934
755755 return 2;
756756 }
757757
758 static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
759 MachineOperand &MO) {
760 if (MO.isRegister())
761 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
762 false, false, MO.getSubReg());
763 else if (MO.isImmediate())
764 MIB = MIB.addImm(MO.getImm());
765 else if (MO.isFrameIndex())
766 MIB = MIB.addFrameIndex(MO.getIndex());
767 else if (MO.isGlobalAddress())
768 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
769 else if (MO.isConstantPoolIndex())
770 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
771 else if (MO.isJumpTableIndex())
772 MIB = MIB.addJumpTableIndex(MO.getIndex());
773 else if (MO.isExternalSymbol())
774 MIB = MIB.addExternalSymbol(MO.getSymbolName());
775 else
776 assert(0 && "Unknown operand for X86InstrAddOperand!");
777
778 return MIB;
779 }
780
758781 unsigned
759782 X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
760783 MachineBasicBlock *FBB,
851874 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
852875 }
853876
877 static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
878 unsigned StackAlign) {
879 unsigned Opc = 0;
880 if (RC == &X86::GR64RegClass) {
881 Opc = X86::MOV64mr;
882 } else if (RC == &X86::GR32RegClass) {
883 Opc = X86::MOV32mr;
884 } else if (RC == &X86::GR16RegClass) {
885 Opc = X86::MOV16mr;
886 } else if (RC == &X86::GR8RegClass) {
887 Opc = X86::MOV8mr;
888 } else if (RC == &X86::GR32_RegClass) {
889 Opc = X86::MOV32_mr;
890 } else if (RC == &X86::GR16_RegClass) {
891 Opc = X86::MOV16_mr;
892 } else if (RC == &X86::RFP80RegClass) {
893 Opc = X86::ST_FpP80m; // pops
894 } else if (RC == &X86::RFP64RegClass) {
895 Opc = X86::ST_Fp64m;
896 } else if (RC == &X86::RFP32RegClass) {
897 Opc = X86::ST_Fp32m;
898 } else if (RC == &X86::FR32RegClass) {
899 Opc = X86::MOVSSmr;
900 } else if (RC == &X86::FR64RegClass) {
901 Opc = X86::MOVSDmr;
902 } else if (RC == &X86::VR128RegClass) {
903 // FIXME: Use movaps once we are capable of selectively
904 // aligning functions that spill SSE registers on 16-byte boundaries.
905 Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
906 } else if (RC == &X86::VR64RegClass) {
907 Opc = X86::MMX_MOVQ64mr;
908 } else {
909 assert(0 && "Unknown regclass");
910 abort();
911 }
912
913 return Opc;
914 }
915
916 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
917 MachineBasicBlock::iterator MI,
918 unsigned SrcReg, bool isKill, int FrameIdx,
919 const TargetRegisterClass *RC) const {
920 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
921 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
922 .addReg(SrcReg, false, false, isKill);
923 }
924
925 void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
926 bool isKill,
927 SmallVectorImpl &Addr,
928 const TargetRegisterClass *RC,
929 SmallVectorImpl &NewMIs) const {
930 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
931 MachineInstrBuilder MIB = BuildMI(get(Opc));
932 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
933 MIB = X86InstrAddOperand(MIB, Addr[i]);
934 MIB.addReg(SrcReg, false, false, isKill);
935 NewMIs.push_back(MIB);
936 }
937
938 static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
939 unsigned StackAlign) {
940 unsigned Opc = 0;
941 if (RC == &X86::GR64RegClass) {
942 Opc = X86::MOV64rm;
943 } else if (RC == &X86::GR32RegClass) {
944 Opc = X86::MOV32rm;
945 } else if (RC == &X86::GR16RegClass) {
946 Opc = X86::MOV16rm;
947 } else if (RC == &X86::GR8RegClass) {
948 Opc = X86::MOV8rm;
949 } else if (RC == &X86::GR32_RegClass) {
950 Opc = X86::MOV32_rm;
951 } else if (RC == &X86::GR16_RegClass) {
952 Opc = X86::MOV16_rm;
953 } else if (RC == &X86::RFP80RegClass) {
954 Opc = X86::LD_Fp80m;
955 } else if (RC == &X86::RFP64RegClass) {
956 Opc = X86::LD_Fp64m;
957 } else if (RC == &X86::RFP32RegClass) {
958 Opc = X86::LD_Fp32m;
959 } else if (RC == &X86::FR32RegClass) {
960 Opc = X86::MOVSSrm;
961 } else if (RC == &X86::FR64RegClass) {
962 Opc = X86::MOVSDrm;
963 } else if (RC == &X86::VR128RegClass) {
964 // FIXME: Use movaps once we are capable of selectively
965 // aligning functions that spill SSE registers on 16-byte boundaries.
966 Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
967 } else if (RC == &X86::VR64RegClass) {
968 Opc = X86::MMX_MOVQ64rm;
969 } else {
970 assert(0 && "Unknown regclass");
971 abort();
972 }
973
974 return Opc;
975 }
976
977 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
978 MachineBasicBlock::iterator MI,
979 unsigned DestReg, int FrameIdx,
980 const TargetRegisterClass *RC) const{
981 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
982 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
983 }
984
985 void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
986 SmallVectorImpl &Addr,
987 const TargetRegisterClass *RC,
988 SmallVectorImpl &NewMIs) const {
989 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
990 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
991 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
992 MIB = X86InstrAddOperand(MIB, Addr[i]);
993 NewMIs.push_back(MIB);
994 }
995
854996 bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
855997 if (MBB.empty()) return false;
856998
283283 unsigned DestReg, unsigned SrcReg,
284284 const TargetRegisterClass *DestRC,
285285 const TargetRegisterClass *SrcRC) const;
286 virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
287 MachineBasicBlock::iterator MI,
288 unsigned SrcReg, bool isKill, int FrameIndex,
289 const TargetRegisterClass *RC) const;
290
291 virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
292 SmallVectorImpl &Addr,
293 const TargetRegisterClass *RC,
294 SmallVectorImpl &NewMIs) const;
295
296 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
297 MachineBasicBlock::iterator MI,
298 unsigned DestReg, int FrameIndex,
299 const TargetRegisterClass *RC) const;
300
301 virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
302 SmallVectorImpl &Addr,
303 const TargetRegisterClass *RC,
304 SmallVectorImpl &NewMIs) const;
286305 virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const;
287306 virtual bool ReverseBranchCondition(std::vector &Cond) const;
288307
835835 return Opc;
836836 }
837837
838 void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
839 MachineBasicBlock::iterator MI,
840 unsigned SrcReg, bool isKill, int FrameIdx,
841 const TargetRegisterClass *RC) const {
842 unsigned Opc = getStoreRegOpcode(RC, StackAlign);
843 addFrameReference(BuildMI(MBB, MI, TII.get(Opc)), FrameIdx)
844 .addReg(SrcReg, false, false, isKill);
845 }
846
847 void X86RegisterInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
848 bool isKill,
849 SmallVectorImpl &Addr,
850 const TargetRegisterClass *RC,
838 const TargetRegisterClass *
839 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
840 if (RC == &X86::CCRRegClass)
841 if (Is64Bit)
842 return &X86::GR64RegClass;
843 else
844 return &X86::GR32RegClass;
845 return NULL;
846 }
847
848 void X86RegisterInfo::reMaterialize(MachineBasicBlock &MBB,
849 MachineBasicBlock::iterator I,
850 unsigned DestReg,
851 const MachineInstr *Orig) const {
852 // MOV32r0 etc. are implemented with xor which clobbers condition code.
853 // Re-materialize them as movri instructions to avoid side effects.
854 switch (Orig->getOpcode()) {
855 case X86::MOV8r0:
856 BuildMI(MBB, I, TII.get(X86::MOV8ri), DestReg).addImm(0);
857 break;
858 case X86::MOV16r0:
859 BuildMI(MBB, I, TII.get(X86::MOV16ri), DestReg).addImm(0);
860 break;
861 case X86::MOV32r0:
862 BuildMI(MBB, I, TII.get(X86::MOV32ri), DestReg).addImm(0);
863 break;
864 case X86::MOV64r0:
865 BuildMI(MBB, I, TII.get(X86::MOV64ri32), DestReg).addImm(0);
866 break;
867 default: {
868 MachineInstr *MI = Orig->clone();
869 MI->getOperand(0).setReg(DestReg);
870 MBB.insert(I, MI);
871 break;
872 }
873 }
874 }
875
876 static MachineInstr *FuseTwoAddrInst(unsigned Opcode,
877 SmallVector &MOs,
878 MachineInstr *MI, const TargetInstrInfo &TII) {
879 // Create the base instruction with the memory operand as the first part.
880 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
881 MachineInstrBuilder MIB(NewMI);
882 unsigned NumAddrOps = MOs.size();
883 for (unsigned i = 0; i != NumAddrOps; ++i)
884 MIB = X86InstrAddOperand(MIB, MOs[i]);
885 if (NumAddrOps < 4) // FrameIndex only
886 MIB.addImm(1).addReg(0).addImm(0);
887
888 // Loop over the rest of the ri operands, converting them over.
889 unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2;
890 for (unsigned i = 0; i != NumOps; ++i) {
891 MachineOperand &MO = MI->getOperand(i+2);
892 MIB = X86InstrAddOperand(MIB, MO);
893 }
894 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
895 MachineOperand &MO = MI->getOperand(i);
896 MIB = X86InstrAddOperand(MIB, MO);
897 }
898 return MIB;
899 }
900
901 static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
902 SmallVector &MOs,
903 MachineInstr *MI, const TargetInstrInfo &TII) {
904 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
905 MachineInstrBuilder MIB(NewMI);
906
907 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
908 MachineOperand &MO = MI->getOperand(i);
909 if (i == OpNo) {
910 assert(MO.isRegister() && "Expected to fold into reg operand!");
911 unsigned NumAddrOps = MOs.size();
912 for (unsigned i = 0; i != NumAddrOps; ++i)
913 MIB = X86InstrAddOperand(MIB, MOs[i]);
914 if (NumAddrOps < 4) // FrameIndex only
915 MIB.addImm(1).addReg(0).addImm(0);
916 } else {
917 MIB = X86InstrAddOperand(MIB, MO);
918 }
919 }
920 return MIB;
921 }
922
923 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
924 SmallVector &MOs,
925 MachineInstr *MI) {
926 MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
927
928 unsigned NumAddrOps = MOs.size();
929 for (unsigned i = 0; i != NumAddrOps; ++i)
930 MIB = X86InstrAddOperand(MIB, MOs[i]);
931 if (NumAddrOps < 4) // FrameIndex only
932 MIB.addImm(1).addReg(0).addImm(0);
933 return MIB.addImm(0);
934 }
935
936 MachineInstr*
937 X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
938 SmallVector &MOs) const {
939 const DenseMap *OpcodeTablePtr = NULL;
940 bool isTwoAddrFold = false;
941 unsigned NumOps = TII.getNumOperands(MI->getOpcode());
942 bool isTwoAddr = NumOps > 1 &&
943 MI->getInstrDescriptor()->getOperandConstraint(1, TOI::TIED_TO) != -1;
944
945 MachineInstr *NewMI = NULL;
946 // Folding a memory location into the two-address part of a two-address
947 // instruction is different than folding it other places. It requires
948 // replacing the *two* registers with the memory location.
949 if (isTwoAddr && NumOps >= 2 && i < 2 &&
950 MI->getOperand(0).isRegister() &&
951 MI->getOperand(1).isRegister() &&
952 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
953 OpcodeTablePtr = &RegOp2MemOpTable2Addr;
954 isTwoAddrFold = true;
955 } else if (i == 0) { // If operand 0
956 if (MI->getOpcode() == X86::MOV16r0)
957 NewMI = MakeM0Inst(TII, X86::MOV16mi, MOs, MI);
958 else if (MI->getOpcode() == X86::MOV32r0)
959 NewMI = MakeM0Inst(TII, X86::MOV32mi, MOs, MI);
960 else if (MI->getOpcode() == X86::MOV64r0)
961 NewMI = MakeM0Inst(TII, X86::MOV64mi32, MOs, MI);
962 else if (MI->getOpcode() == X86::MOV8r0)
963 NewMI = MakeM0Inst(TII, X86::MOV8mi, MOs, MI);
964 if (NewMI) {
965 NewMI->copyKillDeadInfo(MI);
966 return NewMI;
967 }
968
969 OpcodeTablePtr = &RegOp2MemOpTable0;
970 } else if (i == 1) {
971 OpcodeTablePtr = &RegOp2MemOpTable1;
972 } else if (i == 2) {
973 OpcodeTablePtr = &RegOp2MemOpTable2;
974 }
975
976 // If table selected...
977 if (OpcodeTablePtr) {
978 // Find the Opcode to fuse
979 DenseMap::iterator I =
980 OpcodeTablePtr->find((unsigned*)MI->getOpcode());
981 if (I != OpcodeTablePtr->end()) {
982 if (isTwoAddrFold)
983 NewMI = FuseTwoAddrInst(I->second, MOs, MI, TII);
984 else
985 NewMI = FuseInst(I->second, i, MOs, MI, TII);
986 NewMI->copyKillDeadInfo(MI);
987 return NewMI;
988 }
989 }
990
991 // No fusion
992 if (PrintFailedFusing)
993 cerr << "We failed to fuse ("
994 << ((i == 1) ? "r" : "s") << "): " << *MI;
995 return NULL;
996 }
997
998
999 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1000 SmallVectorImpl &Ops,
1001 int FrameIndex) const {
1002 // Check switch flag
1003 if (NoFusing) return NULL;
1004
1005 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1006 unsigned NewOpc = 0;
1007 switch (MI->getOpcode()) {
1008 default: return NULL;
1009 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1010 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1011 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1012 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1013 }
1014 // Change to CMPXXri r, 0 first.
1015 MI->setInstrDescriptor(TII.get(NewOpc));
1016 MI->getOperand(1).ChangeToImmediate(0);
1017 } else if (Ops.size() != 1)
1018 return NULL;
1019
1020 SmallVector MOs;
1021 MOs.push_back(MachineOperand::CreateFI(FrameIndex));
1022 return foldMemoryOperand(MI, Ops[0], MOs);
1023 }
1024
1025 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1026 SmallVectorImpl &Ops,
1027 MachineInstr *LoadMI) const {
1028 // Check switch flag
1029 if (NoFusing) return NULL;
1030
1031 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1032 unsigned NewOpc = 0;
1033 switch (MI->getOpcode()) {
1034 default: return NULL;
1035 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1036 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1037 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1038 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1039 }
1040 // Change to CMPXXri r, 0 first.
1041 MI->setInstrDescriptor(TII.get(NewOpc));
1042 MI->getOperand(1).ChangeToImmediate(0);
1043 } else if (Ops.size() != 1)
1044 return NULL;
1045
1046 SmallVector MOs;
1047 unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
1048 for (unsigned i = NumOps - 4; i != NumOps; ++i)
1049 MOs.push_back(LoadMI->getOperand(i));
1050 return foldMemoryOperand(MI, Ops[0], MOs);
1051 }
1052
1053
1054 bool X86RegisterInfo::canFoldMemoryOperand(MachineInstr *MI,
1055 SmallVectorImpl &Ops) const {
1056 // Check switch flag
1057 if (NoFusing) return 0;
1058
1059 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1060 switch (MI->getOpcode()) {
1061 default: return false;
1062 case X86::TEST8rr:
1063 case X86::TEST16rr:
1064 case X86::TEST32rr:
1065 case X86::TEST64rr:
1066 return true;
1067 }
1068 }
1069
1070 if (Ops.size() != 1)
1071 return false;
1072
1073 unsigned OpNum = Ops[0];
1074 unsigned Opc = MI->getOpcode();
1075 unsigned NumOps = TII.getNumOperands(Opc);
1076 bool isTwoAddr = NumOps > 1 &&
1077 TII.getOperandConstraint(Opc, 1, TOI::TIED_TO) != -1;
1078
1079 // Folding a memory location into the two-address part of a two-address
1080 // instruction is different than folding it other places. It requires
1081 // replacing the *two* registers with the memory location.
1082 const DenseMap *OpcodeTablePtr = NULL;
1083 if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
1084 OpcodeTablePtr = &RegOp2MemOpTable2Addr;
1085 } else if (OpNum == 0) { // If operand 0
1086 switch (Opc) {
1087 case X86::MOV16r0:
1088 case X86::MOV32r0:
1089 case X86::MOV64r0:
1090 case X86::MOV8r0:
1091 return true;
1092 default: break;
1093 }
1094 OpcodeTablePtr = &RegOp2MemOpTable0;
1095 } else if (OpNum == 1) {
1096 OpcodeTablePtr = &RegOp2MemOpTable1;
1097 } else if (OpNum == 2) {
1098 OpcodeTablePtr = &RegOp2MemOpTable2;
1099 }
1100
1101 if (OpcodeTablePtr) {
1102 // Find the Opcode to fuse
1103 DenseMap::iterator I =
1104 OpcodeTablePtr->find((unsigned*)Opc);
1105 if (I != OpcodeTablePtr->end())
1106 return true;
1107 }
1108 return false;
1109 }
1110
1111 bool X86RegisterInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
1112 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
8511113 SmallVectorImpl &NewMIs) const {
852 unsigned Opc = getStoreRegOpcode(RC, StackAlign);
853 MachineInstrBuilder MIB = BuildMI(TII.get(Opc));
854 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
855 MIB = X86InstrAddOperand(MIB, Addr[i]);
856 MIB.addReg(SrcReg, false, false, isKill);
857 NewMIs.push_back(MIB);
1114 DenseMap >::iterator I =
1115 MemOp2RegOpTable.find((unsigned*)MI->getOpcode());
1116 if (I == MemOp2RegOpTable.end())
1117 return false;
1118 unsigned Opc = I->second.first;
1119 unsigned Index = I->second.second & 0xf;
1120 bool FoldedLoad = I->second.second & (1 << 4);
1121 bool FoldedStore = I->second.second & (1 << 5);
1122 if (UnfoldLoad && !FoldedLoad)
1123 return false;
1124 UnfoldLoad &= FoldedLoad;
1125 if (UnfoldStore && !FoldedStore)
1126 return false;
1127 UnfoldStore &= FoldedStore;
1128
1129 const TargetInstrDescriptor &TID = TII.get(Opc);
1130 const TargetOperandInfo &TOI = TID.OpInfo[Index];
1131 const TargetRegisterClass *RC = (TOI.Flags & M_LOOK_UP_PTR_REG_CLASS)
1132 ? TII.getPointerRegClass() : getRegClass(TOI.RegClass);
1133 SmallVector AddrOps;
1134 SmallVector BeforeOps;
1135 SmallVector AfterOps;
1136 SmallVector ImpOps;
1137 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1138 MachineOperand &Op = MI->getOperand(i);
1139 if (i >= Index && i < Index+4)
1140 AddrOps.push_back(Op);
1141 else if (Op.isRegister() && Op.isImplicit())
1142 ImpOps.push_back(Op);
1143 else if (i < Index)
1144 BeforeOps.push_back(Op);
1145 else if (i > Index)
1146 AfterOps.push_back(Op);
1147 }
1148
1149 // Emit the load instruction.
1150 if (UnfoldLoad) {
1151 TII.loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs);
1152 if (UnfoldStore) {
1153 // Address operands cannot be marked isKill.
1154 for (unsigned i = 1; i != 5; ++i) {
1155 MachineOperand &MO = NewMIs[0]->getOperand(i);
1156 if (MO.isRegister())
1157 MO.setIsKill(false);
1158 }
1159 }
1160 }
1161
1162 // Emit the data processing instruction.
1163 MachineInstr *DataMI = new MachineInstr(TID, true);
1164 MachineInstrBuilder MIB(DataMI);
1165
1166 if (FoldedStore)
1167 MIB.addReg(Reg, true);
1168 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
1169 MIB = X86InstrAddOperand(MIB, BeforeOps[i]);
1170 if (FoldedLoad)
1171 MIB.addReg(Reg);
1172 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
1173 MIB = X86InstrAddOperand(MIB, AfterOps[i]);
1174 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
1175 MachineOperand &MO = ImpOps[i];
1176 MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead());
1177 }
1178 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
1179 unsigned NewOpc = 0;
1180 switch (DataMI->getOpcode()) {
1181 default: break;
1182 case X86::CMP64ri32:
1183 case X86::CMP32ri:
1184 case X86::CMP16ri:
1185 case X86::CMP8ri: {
1186 MachineOperand &MO0 = DataMI->getOperand(0);
1187 MachineOperand &MO1 = DataMI->getOperand(1);
1188 if (MO1.getImm() == 0) {
1189 switch (DataMI->getOpcode()) {
1190 default: break;
1191 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
1192 case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
1193 case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
1194 case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
1195 }
1196 DataMI->setInstrDescriptor(TII.get(NewOpc));
1197 MO1.ChangeToRegister(MO0.getReg(), false);
1198 }
1199 }
1200 }
1201 NewMIs.push_back(DataMI);
1202
1203 // Emit the store instruction.
1204 if (UnfoldStore) {
1205 const TargetOperandInfo &DstTOI = TID.OpInfo[0];
1206 const TargetRegisterClass *DstRC = (DstTOI.Flags & M_LOOK_UP_PTR_REG_CLASS)
1207 ? TII.getPointerRegClass() : getRegClass(DstTOI.RegClass);
1208 TII.storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs);
1209 }
1210
1211 return true;
8581212 }
8591213
8601214 static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
8951249
8961250 return Opc;
8971251 }
898
899 void X86RegisterInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
900 MachineBasicBlock::iterator MI,
901 unsigned DestReg, int FrameIdx,
902 const TargetRegisterClass *RC) const{
903 unsigned Opc = getLoadRegOpcode(RC, StackAlign);
904 addFrameReference(BuildMI(MBB, MI, TII.get(Opc), DestReg), FrameIdx);
905 }
906
907 void X86RegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
908 SmallVectorImpl &Addr,
909 const TargetRegisterClass *RC,
910 SmallVectorImpl &NewMIs) const {
911 unsigned Opc = getLoadRegOpcode(RC, StackAlign);
912 MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
913 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
914 MIB = X86InstrAddOperand(MIB, Addr[i]);
915 NewMIs.push_back(MIB);
916 }
917
918 const TargetRegisterClass *
919 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
920 if (RC == &X86::CCRRegClass)
921 if (Is64Bit)
922 return &X86::GR64RegClass;
923 else
924 return &X86::GR32RegClass;
925 return NULL;
926 }
927
928 void X86RegisterInfo::reMaterialize(MachineBasicBlock &MBB,
929 MachineBasicBlock::iterator I,
930 unsigned DestReg,
931 const MachineInstr *Orig) const {
932 // MOV32r0 etc. are implemented with xor which clobbers condition code.
933 // Re-materialize them as movri instructions to avoid side effects.
934 switch (Orig->getOpcode()) {
935 case X86::MOV8r0:
936 BuildMI(MBB, I, TII.get(X86::MOV8ri), DestReg).addImm(0);
937 break;
938 case X86::MOV16r0:
939 BuildMI(MBB, I, TII.get(X86::MOV16ri), DestReg).addImm(0);
940 break;
941 case X86::MOV32r0:
942 BuildMI(MBB, I, TII.get(X86::MOV32ri), DestReg).addImm(0);
943 break;
944 case X86::MOV64r0:
945 BuildMI(MBB, I, TII.get(X86::MOV64ri32), DestReg).addImm(0);
946 break;
947 default: {
948 MachineInstr *MI = Orig->clone();
949 MI->getOperand(0).setReg(DestReg);
950 MBB.insert(I, MI);
951 break;
952 }
953 }
954 }
955
956 static MachineInstr *FuseTwoAddrInst(unsigned Opcode,
957 SmallVector &MOs,
958 MachineInstr *MI, const TargetInstrInfo &TII) {
959 // Create the base instruction with the memory operand as the first part.
960 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
961 MachineInstrBuilder MIB(NewMI);
962 unsigned NumAddrOps = MOs.size();
963 for (unsigned i = 0; i != NumAddrOps; ++i)
964 MIB = X86InstrAddOperand(MIB, MOs[i]);
965 if (NumAddrOps < 4) // FrameIndex only
966 MIB.addImm(1).addReg(0).addImm(0);
967
968 // Loop over the rest of the ri operands, converting them over.
969 unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2;
970 for (unsigned i = 0; i != NumOps; ++i) {
971 MachineOperand &MO = MI->getOperand(i+2);
972 MIB = X86InstrAddOperand(MIB, MO);
973 }
974 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
975 MachineOperand &MO = MI->getOperand(i);
976 MIB = X86InstrAddOperand(MIB, MO);
977 }
978 return MIB;
979 }
980
981 static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
982 SmallVector &MOs,
983 MachineInstr *MI, const TargetInstrInfo &TII) {
984 MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
985 MachineInstrBuilder MIB(NewMI);
986
987 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
988 MachineOperand &MO = MI->getOperand(i);
989 if (i == OpNo) {
990 assert(MO.isRegister() && "Expected to fold into reg operand!");
991 unsigned NumAddrOps = MOs.size();
992 for (unsigned i = 0; i != NumAddrOps; ++i)
993 MIB = X86InstrAddOperand(MIB, MOs[i]);
994 if (NumAddrOps < 4) // FrameIndex only
995 MIB.addImm(1).addReg(0).addImm(0);
996 } else {
997 MIB = X86InstrAddOperand(MIB, MO);
998 }
999 }
1000 return MIB;
1001 }
1002
1003 static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
1004 SmallVector &MOs,
1005 MachineInstr *MI) {
1006 MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
1007
1008 unsigned NumAddrOps = MOs.size();
1009 for (unsigned i = 0; i != NumAddrOps; ++i)
1010 MIB = X86InstrAddOperand(MIB, MOs[i]);
1011 if (NumAddrOps < 4) // FrameIndex only
1012 MIB.addImm(1).addReg(0).addImm(0);
1013 return MIB.addImm(0);
1014 }
1015
1016 MachineInstr*
1017 X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
1018 SmallVector &MOs) const {
1019 const DenseMap *OpcodeTablePtr = NULL;
1020 bool isTwoAddrFold = false;
1021 unsigned NumOps = TII.getNumOperands(MI->getOpcode());
1022 bool isTwoAddr = NumOps > 1 &&
1023 MI->getInstrDescriptor()->getOperandConstraint(1, TOI::TIED_TO) != -1;
1024
1025 MachineInstr *NewMI = NULL;
1026 // Folding a memory location into the two-address part of a two-address
1027 // instruction is different than folding it other places. It requires
1028 // replacing the *two* registers with the memory location.
1029 if (isTwoAddr && NumOps >= 2 && i < 2 &&
1030 MI->getOperand(0).isRegister() &&
1031 MI->getOperand(1).isRegister() &&
1032 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
1033 OpcodeTablePtr = &RegOp2MemOpTable2Addr;
1034 isTwoAddrFold = true;
1035 } else if (i == 0) { // If operand 0
1036 if (MI->getOpcode() == X86::MOV16r0)
1037 NewMI = MakeM0Inst(TII, X86::MOV16mi, MOs, MI);
1038 else if (MI->getOpcode() == X86::MOV32r0)
1039 NewMI = MakeM0Inst(TII, X86::MOV32mi, MOs, MI);
1040 else if (MI->getOpcode() == X86::MOV64r0)
1041 NewMI = MakeM0Inst(TII, X86::MOV64mi32, MOs, MI);
1042 else if (MI->getOpcode() == X86::MOV8r0)
1043 NewMI = MakeM0Inst(TII, X86::MOV8mi, MOs, MI);
1044 if (NewMI) {
1045 NewMI->copyKillDeadInfo(MI);
1046 return NewMI;
1047 }
1048
1049 OpcodeTablePtr = &RegOp2MemOpTable0;
1050 } else if (i == 1) {
1051 OpcodeTablePtr = &RegOp2MemOpTable1;
1052 } else if (i == 2) {
1053 OpcodeTablePtr = &RegOp2MemOpTable2;
1054 }
1055
1056 // If table selected...
1057 if (OpcodeTablePtr) {
1058 // Find the Opcode to fuse
1059 DenseMap::iterator I =
1060 OpcodeTablePtr->find((unsigned*)MI->getOpcode());
1061 if (I != OpcodeTablePtr->end()) {
1062 if (isTwoAddrFold)
1063 NewMI = FuseTwoAddrInst(I->second, MOs, MI, TII);
1064 else
1065 NewMI = FuseInst(I->second, i, MOs, MI, TII);
1066 NewMI->copyKillDeadInfo(MI);
1067 return NewMI;
1068 }
1069 }
1070
1071 // No fusion
1072 if (PrintFailedFusing)
1073 cerr << "We failed to fuse ("
1074 << ((i == 1) ? "r" : "s") << "): " << *MI;
1075 return NULL;
1076 }
1077
1078
1079 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1080 SmallVectorImpl &Ops,
1081 int FrameIndex) const {
1082 // Check switch flag
1083 if (NoFusing) return NULL;
1084
1085 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1086 unsigned NewOpc = 0;
1087 switch (MI->getOpcode()) {
1088 default: return NULL;
1089 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1090 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1091 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1092 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1093 }
1094 // Change to CMPXXri r, 0 first.
1095 MI->setInstrDescriptor(TII.get(NewOpc));
1096 MI->getOperand(1).ChangeToImmediate(0);
1097 } else if (Ops.size() != 1)
1098 return NULL;
1099
1100 SmallVector MOs;
1101 MOs.push_back(MachineOperand::CreateFI(FrameIndex));
1102 return foldMemoryOperand(MI, Ops[0], MOs);
1103 }
1104
1105 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1106 SmallVectorImpl &Ops,
1107 MachineInstr *LoadMI) const {
1108 // Check switch flag
1109 if (NoFusing) return NULL;
1110
1111