llvm.org GIT mirror llvm / aee4af6
Remove redundant foldMemoryOperand variants and other code clean up. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44517 91177308-0d34-0410-b5e6-96231b3b80d8 Evan Cheng 11 years ago
19 changed file(s) with 176 addition(s) and 262 deletion(s). Raw diff Collapse all Expand all
274274 /// returns true.
275275 bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
276276 MachineInstr *DefMI, unsigned InstrIdx,
277 unsigned OpIdx,
278 SmallVector &UseOps,
277 SmallVector &Ops,
279278 bool isSS, int Slot, unsigned Reg);
280279
281280 /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
532532 const MachineInstr *Orig) const = 0;
533533
534534 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
535 /// slot into the specified machine instruction for the specified operand. If
536 /// this is possible, a new instruction is returned with the specified operand
537 /// folded, otherwise NULL is returned. The client is responsible for removing
538 /// the old instruction and adding the new one in the instruction stream
535 /// slot into the specified machine instruction for the specified operand(s).
536 /// If this is possible, a new instruction is returned with the specified
537 /// operand folded, otherwise NULL is returned. The client is responsible for
538 /// removing the old instruction and adding the new one in the instruction
539 /// stream.
539540 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
540 unsigned OpNum,
541 int FrameIndex) const {
542 return 0;
543 }
544
545 /// foldMemoryOperand - Same as previous except it tries to fold instruction
546 /// with multiple uses of the same register.
547 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
548 SmallVectorImpl &UseOps,
541 SmallVectorImpl &Ops,
549542 int FrameIndex) const {
550543 return 0;
551544 }
554547 /// of any load and store from / to any address, not just from a specific
555548 /// stack slot.
556549 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
557 unsigned OpNum,
558 MachineInstr* LoadMI) const {
559 return 0;
560 }
561
562 /// foldMemoryOperand - Same as previous except it tries to fold instruction
563 /// with multiple uses of the same register.
564 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
565 SmallVectorImpl &UseOps,
550 SmallVectorImpl &Ops,
566551 MachineInstr* LoadMI) const {
567552 return 0;
568553 }
642642 /// returns true.
643643 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
644644 VirtRegMap &vrm, MachineInstr *DefMI,
645 unsigned InstrIdx, unsigned OpIdx,
646 SmallVector &UseOps,
645 unsigned InstrIdx,
646 SmallVector &Ops,
647647 bool isSS, int Slot, unsigned Reg) {
648 // FIXME: fold subreg use
649 if (MI->getOperand(OpIdx).getSubReg())
650 return false;
651
652 MachineInstr *fmi = NULL;
653
654 if (UseOps.size() < 2)
655 fmi = isSS ? mri_->foldMemoryOperand(MI, OpIdx, Slot)
656 : mri_->foldMemoryOperand(MI, OpIdx, DefMI);
657 else {
658 if (OpIdx != UseOps[0])
659 // Must be two-address instruction + one more use. Not going to fold.
648 unsigned MRInfo = 0;
649 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
650 SmallVector FoldOps;
651 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
652 unsigned OpIdx = Ops[i];
653 // FIXME: fold subreg use.
654 if (MI->getOperand(OpIdx).getSubReg())
660655 return false;
661 // It may be possible to fold load when there are multiple uses.
662 // e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0
663 fmi = isSS ? mri_->foldMemoryOperand(MI, UseOps, Slot)
664 : mri_->foldMemoryOperand(MI, UseOps, DefMI);
665 }
666
656 if (MI->getOperand(OpIdx).isDef())
657 MRInfo |= (unsigned)VirtRegMap::isMod;
658 else {
659 // Filter out two-address use operand(s).
660 if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
661 MRInfo = VirtRegMap::isModRef;
662 continue;
663 }
664 MRInfo |= (unsigned)VirtRegMap::isRef;
665 }
666 FoldOps.push_back(OpIdx);
667 }
668
669 MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
670 : mri_->foldMemoryOperand(MI, FoldOps, DefMI);
667671 if (fmi) {
668672 // Attempt to fold the memory reference into the instruction. If
669673 // we can do this, we don't need to insert spill code.
673677 LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
674678 MachineBasicBlock &MBB = *MI->getParent();
675679 if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
676 vrm.virtFolded(Reg, MI, OpIdx, fmi);
680 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
677681 vrm.transferSpillPts(MI, fmi);
678682 vrm.transferRestorePts(MI, fmi);
679683 mi2iMap_.erase(MI);
774778
775779 HasUse = mop.isUse();
776780 HasDef = mop.isDef();
777 SmallVector UseOps;
778 if (HasUse)
779 UseOps.push_back(i);
780 std::vector UpdateOps;
781 SmallVector Ops;
782 Ops.push_back(i);
781783 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
782 if (!MI->getOperand(j).isRegister())
784 const MachineOperand &MOj = MI->getOperand(j);
785 if (!MOj.isRegister())
783786 continue;
784 unsigned RegJ = MI->getOperand(j).getReg();
787 unsigned RegJ = MOj.getReg();
785788 if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
786789 continue;
787790 if (RegJ == RegI) {
788 UpdateOps.push_back(j);
789 if (MI->getOperand(j).isUse())
790 UseOps.push_back(j);
791 HasUse |= MI->getOperand(j).isUse();
792 HasDef |= MI->getOperand(j).isDef();
791 Ops.push_back(j);
792 HasUse |= MOj.isUse();
793 HasDef |= MOj.isDef();
793794 }
794795 }
795796
796797 if (TryFold &&
797 tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i,
798 UseOps, FoldSS, FoldSlot, Reg)) {
798 tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
799 Ops, FoldSS, FoldSlot, Reg)) {
799800 // Folding the load/store can completely change the instruction in
800801 // unpredictable ways, rescan it from the beginning.
801802 HasUse = false;
813814 mop.setReg(NewVReg);
814815
815816 // Reuse NewVReg for other reads.
816 for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j)
817 MI->getOperand(UpdateOps[j]).setReg(NewVReg);
817 for (unsigned j = 0, e = Ops.size(); j != e; ++j)
818 MI->getOperand(Ops[j]).setReg(NewVReg);
818819
819820 if (CreatedNewVReg) {
820821 if (DefIsReMat) {
12251226 if (!TrySplit)
12261227 return NewLIs;
12271228
1228 SmallVector UseOps;
1229 SmallVector Ops;
12291230 if (NeedStackSlot) {
12301231 int Id = SpillMBBs.find_first();
12311232 while (Id != -1) {
12351236 unsigned VReg = spills[i].vreg;
12361237 bool isReMat = vrm.isReMaterialized(VReg);
12371238 MachineInstr *MI = getInstructionFromIndex(index);
1238 int OpIdx = -1;
1239 UseOps.clear();
1239 bool CanFold = false;
1240 bool FoundUse = false;
1241 Ops.clear();
12401242 if (spills[i].canFold) {
1243 CanFold = true;
12411244 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
12421245 MachineOperand &MO = MI->getOperand(j);
12431246 if (!MO.isRegister() || MO.getReg() != VReg)
12441247 continue;
1245 if (MO.isDef()) {
1246 OpIdx = (int)j;
1248
1249 Ops.push_back(j);
1250 if (MO.isDef())
12471251 continue;
1248 }
1249 // Can't fold if it's two-address code and the use isn't the
1250 // first and only use.
1251 if (isReMat ||
1252 (UseOps.empty() && !alsoFoldARestore(Id, index, VReg,
1253 RestoreMBBs, RestoreIdxes))) {
1254 OpIdx = -1;
1252 if (isReMat ||
1253 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1254 RestoreMBBs, RestoreIdxes))) {
1255 // MI has two-address uses of the same register. If the use
1256 // isn't the first and only use in the BB, then we can't fold
1257 // it. FIXME: Move this to rewriteInstructionsForSpills.
1258 CanFold = false;
12551259 break;
12561260 }
1257 UseOps.push_back(j);
1261 FoundUse = true;
12581262 }
12591263 }
12601264 // Fold the store into the def if possible.
12611265 bool Folded = false;
1262 if (OpIdx != -1) {
1263 if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
1264 true, Slot, VReg)) {
1265 if (!UseOps.empty())
1266 // Folded a two-address instruction, do not issue a load.
1266 if (CanFold && !Ops.empty()) {
1267 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1268 Folded = true;
1269 if (FoundUse > 0)
1270 // Also folded uses, do not issue a load.
12671271 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1268 Folded = true;
12691272 }
12701273 }
12711274
1272 // Else tell the spiller to issue a store for us.
1275 // Else tell the spiller to issue a spill.
12731276 if (!Folded)
12741277 vrm.addSpillPoint(VReg, MI);
12751278 }
12861289 continue;
12871290 unsigned VReg = restores[i].vreg;
12881291 MachineInstr *MI = getInstructionFromIndex(index);
1289 int OpIdx = -1;
1290 UseOps.clear();
1292 bool CanFold = false;
1293 Ops.clear();
12911294 if (restores[i].canFold) {
1295 CanFold = true;
12921296 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
12931297 MachineOperand &MO = MI->getOperand(j);
12941298 if (!MO.isRegister() || MO.getReg() != VReg)
12951299 continue;
1300
12961301 if (MO.isDef()) {
1297 // Can't fold if it's two-address code and it hasn't already
1298 // been folded.
1299 OpIdx = -1;
1302 // If this restore were to be folded, it would have been folded
1303 // already.
1304 CanFold = false;
13001305 break;
13011306 }
1302 if (UseOps.empty())
1303 // Use the first use index.
1304 OpIdx = (int)j;
1305 UseOps.push_back(j);
1307 Ops.push_back(j);
13061308 }
13071309 }
13081310
13091311 // Fold the load into the use if possible.
13101312 bool Folded = false;
1311 if (OpIdx != -1) {
1312 if (vrm.isReMaterialized(VReg)) {
1313 if (CanFold && !Ops.empty()) {
1314 if (!vrm.isReMaterialized(VReg))
1315 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1316 else {
13131317 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
13141318 int LdSlot = 0;
13151319 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
13161320 // If the rematerializable def is a load, also try to fold it.
13171321 if (isLoadSS ||
13181322 (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
1319 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
1320 UseOps, isLoadSS, LdSlot, VReg);
1321 } else
1322 Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
1323 true, Slot, VReg);
1323 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1324 Ops, isLoadSS, LdSlot, VReg);
1325 }
13241326 }
13251327 // If folding is not possible / failed, then tell the spiller to issue a
13261328 // load / rematerialization for us.
519519 assignVirtToPhysReg(VirtReg, PhysReg);
520520 } else { // no free registers available.
521521 // try to fold the spill into the instruction
522 if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)) {
522 SmallVector Ops;
523 Ops.push_back(OpNum);
524 if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
523525 ++NumFolded;
524526 // Since we changed the address of MI, make sure to update live variables
525527 // to know that the new instruction has the properties of the old one.
472472 assignVirtToPhysReg(VirtReg, PhysReg);
473473 } else { // No registers available.
474474 // If we can fold this spill into this instruction, do so now.
475 if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)){
475 SmallVector Ops;
476 Ops.push_back(OpNum);
477 if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
476478 ++NumFolded;
477479 // Since we changed the address of MI, make sure to update live variables
478480 // to know that the new instruction has the properties of the old one.
114114 }
115115
116116 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
117 unsigned OpNo, MachineInstr *NewMI) {
117 MachineInstr *NewMI, ModRef MRInfo) {
118118 // Move previous memory references folded to new instruction.
119119 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
120120 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
121121 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
122122 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
123123 MI2VirtMap.erase(I++);
124 }
125
126 ModRef MRInfo;
127 const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor();
128 if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 ||
129 TID->findTiedToSrcOperand(OpNo) != -1) {
130 // Folded a two-address operand.
131 MRInfo = isModRef;
132 } else if (OldMI->getOperand(OpNo).isDef()) {
133 MRInfo = isMod;
134 } else {
135 MRInfo = isRef;
136124 }
137125
138126 // add new memory reference
829817 NewMIs.clear();
830818 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
831819 assert(Idx != -1);
832 MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS);
820 SmallVector Ops;
821 Ops.push_back(Idx);
822 MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Ops, SS);
833823 if (FoldedMI) {
834824 if (!VRM.hasPhys(UnfoldVR))
835825 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
279279 }
280280
281281 /// @brief Updates information about the specified virtual register's value
282 /// folded into newMI machine instruction. The OpNum argument indicates the
283 /// operand number of OldMI that is folded.
284 void virtFolded(unsigned VirtReg, MachineInstr *OldMI, unsigned OpNum,
285 MachineInstr *NewMI);
282 /// folded into newMI machine instruction.
283 void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
284 ModRef MRInfo);
286285
287286 /// @brief Updates information about the specified virtual register's value
288287 /// folded into the specified machine instruction.
346346 }
347347
348348 MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
349 unsigned OpNum, int FI) const {
349 SmallVectorImpl &Ops,
350 int FI) const {
351 if (Ops.size() != 1) return NULL;
352
353 unsigned OpNum = Ops[0];
350354 unsigned Opc = MI->getOpcode();
351355 MachineInstr *NewMI = NULL;
352356 switch (Opc) {
7373 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
7474 unsigned DestReg, const MachineInstr *Orig) const;
7575
76 MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
76 MachineInstr* foldMemoryOperand(MachineInstr* MI,
77 SmallVectorImpl &Ops,
7778 int FrameIndex) const;
7879
7980 MachineInstr* foldMemoryOperand(MachineInstr* MI,
80 SmallVectorImpl &UseOps,
81 int FrameIndex) const {
82 return 0;
83 }
84
85 MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
86 MachineInstr* LoadMI) const {
87 return 0;
88 }
89
90 MachineInstr* foldMemoryOperand(MachineInstr* MI,
91 SmallVectorImpl &UseOps,
81 SmallVectorImpl &Ops,
9282 MachineInstr* LoadMI) const {
9383 return 0;
9484 }
152152 }
153153
154154 MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
155 unsigned OpNum,
155 SmallVectorImpl &Ops,
156156 int FrameIndex) const {
157 if (Ops.size() != 1) return NULL;
158
157159 // Make sure this is a reg-reg copy.
158160 unsigned Opc = MI->getOpcode();
159161
165167 case Alpha::CPYSS:
166168 case Alpha::CPYST:
167169 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
168 if (OpNum == 0) { // move -> store
170 if (Ops[0] == 0) { // move -> store
169171 unsigned InReg = MI->getOperand(1).getReg();
170172 Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
171173 ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
4747 const TargetRegisterClass *RC,
4848 SmallVectorImpl &NewMIs) const;
4949
50 MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
50 MachineInstr* foldMemoryOperand(MachineInstr* MI,
51 SmallVectorImpl &Ops,
5152 int FrameIndex) const;
5253
5354 MachineInstr* foldMemoryOperand(MachineInstr* MI,
54 SmallVectorImpl &UseOps,
55 int FrameIndex) const {
56 return 0;
57 }
58
59 MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
60 MachineInstr* LoadMI) const {
61 return 0;
62 }
63
64 MachineInstr* foldMemoryOperand(MachineInstr* MI,
65 SmallVectorImpl &UseOps,
55 SmallVectorImpl &Ops,
6656 MachineInstr* LoadMI) const {
6757 return 0;
6858 }
175175 }
176176
177177 MachineInstr *MipsRegisterInfo::
178 foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
179 {
178 foldMemoryOperand(MachineInstr* MI,
179 SmallVectorImpl &Ops, int FI) const
180 {
181 if (Ops.size() != 1) return NULL;
182
180183 MachineInstr *NewMI = NULL;
181184
182185 switch (MI->getOpcode())
187190 (MI->getOperand(1).getReg() == Mips::ZERO) &&
188191 (MI->getOperand(2).isRegister()))
189192 {
190 if (OpNum == 0) // COPY -> STORE
193 if (Ops[0] == 0) // COPY -> STORE
191194 NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI)
192195 .addImm(0).addReg(MI->getOperand(2).getReg());
193 else // COPY -> LOAD
196 else // COPY -> LOAD
194197 NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0)
195198 .getReg()).addImm(0).addFrameIndex(FI);
196199 }
5454 void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
5555 unsigned DestReg, const MachineInstr *Orig) const;
5656
57 MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
57 MachineInstr* foldMemoryOperand(MachineInstr* MI,
58 SmallVectorImpl &Ops,
5859 int FrameIndex) const;
5960
6061 MachineInstr* foldMemoryOperand(MachineInstr* MI,
61 SmallVectorImpl &UseOps,
62 int FrameIndex) const {
63 return 0;
64 }
65
66 MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
67 MachineInstr* LoadMI) const {
68 return 0;
69 }
70
71 MachineInstr* foldMemoryOperand(MachineInstr* MI,
72 SmallVectorImpl &UseOps,
62 SmallVectorImpl &Ops,
7363 MachineInstr* LoadMI) const {
7464 return 0;
7565 }
554554 /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
555555 /// copy instructions, turning them into load/store instructions.
556556 MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
557 unsigned OpNum,
558 int FrameIndex) const {
557 SmallVectorImpl &Ops,
558 int FrameIndex) const {
559 if (Ops.size() != 1) return NULL;
560
559561 // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
560562 // it takes more than one instruction to store it.
561563 unsigned Opc = MI->getOpcode();
564 unsigned OpNum = Ops[0];
562565
563566 MachineInstr *NewMI = NULL;
564567 if ((Opc == PPC::OR &&
6464
6565 /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
6666 /// copy instructions, turning them into load/store instructions.
67 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
67 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
68 SmallVectorImpl &Ops,
6869 int FrameIndex) const;
69
70 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
71 SmallVectorImpl &UseOps,
72 int FrameIndex) const {
73 return 0;
74 }
75
76 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
77 MachineInstr* LoadMI) const {
78 return 0;
79 }
8070
8171 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
82 SmallVectorImpl &UseOps,
72 SmallVectorImpl &Ops,
8373 MachineInstr* LoadMI) const {
8474 return 0;
8575 }
147147 }
148148
149149 MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
150 unsigned OpNum,
151 int FI) const {
150 SmallVectorImpl &Ops,
151 int FI) const {
152 if (Ops.size() != 1) return NULL;
153
154 unsigned OpNum = Ops[0];
152155 bool isFloat = false;
153156 MachineInstr *NewMI = NULL;
154157 switch (MI->getOpcode()) {
5858 unsigned DestReg, const MachineInstr *Orig) const;
5959
6060 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
61 unsigned OpNum,
61 SmallVectorImpl &Ops,
6262 int FrameIndex) const;
6363
6464 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
65 SmallVectorImpl &UseOps,
66 int FrameIndex) const {
67 return 0;
68 }
69
70 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
71 unsigned OpNum,
72 MachineInstr* LoadMI) const {
73 return 0;
74 }
75
76 virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
77 SmallVectorImpl &UseOps,
65 SmallVectorImpl &Ops,
7866 MachineInstr* LoadMI) const {
7967 return 0;
8068 }
11391139 }
11401140
11411141
1142 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
1143 int FrameIndex) const {
1144 // Check switch flag
1145 if (NoFusing) return NULL;
1146 SmallVector MOs;
1147 MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
1148 return foldMemoryOperand(MI, OpNum, MOs);
1149 }
1150
11511142 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1152 SmallVectorImpl &UseOps,
1143 SmallVectorImpl &Ops,
11531144 int FrameIndex) const {
11541145 // Check switch flag
11551146 if (NoFusing) return NULL;
11561147
1157 if (UseOps.size() == 1)
1158 return foldMemoryOperand(MI, UseOps[0], FrameIndex);
1159 else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
1148 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1149 unsigned NewOpc = 0;
1150 switch (MI->getOpcode()) {
1151 default: return NULL;
1152 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1153 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1154 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1155 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1156 }
1157 // Change to CMPXXri r, 0 first.
1158 MI->setInstrDescriptor(TII.get(NewOpc));
1159 MI->getOperand(1).ChangeToImmediate(0);
1160 } else if (Ops.size() != 1)
11601161 return NULL;
11611162
1162 unsigned NewOpc = 0;
1163 switch (MI->getOpcode()) {
1164 default: return NULL;
1165 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1166 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1167 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1168 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1169 }
1170 // Change to CMPXXri r, 0 first.
1171 MI->setInstrDescriptor(TII.get(NewOpc));
1172 MI->getOperand(1).ChangeToImmediate(0);
1173 return foldMemoryOperand(MI, 0, FrameIndex);
1174 }
1175
1176 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
1163 SmallVector MOs;
1164 MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
1165 return foldMemoryOperand(MI, Ops[0], MOs);
1166 }
1167
1168 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1169 SmallVectorImpl &Ops,
11771170 MachineInstr *LoadMI) const {
11781171 // Check switch flag
11791172 if (NoFusing) return NULL;
1173
1174 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1175 unsigned NewOpc = 0;
1176 switch (MI->getOpcode()) {
1177 default: return NULL;
1178 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1179 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1180 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1181 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1182 }
1183 // Change to CMPXXri r, 0 first.
1184 MI->setInstrDescriptor(TII.get(NewOpc));
1185 MI->getOperand(1).ChangeToImmediate(0);
1186 } else if (Ops.size() != 1)
1187 return NULL;
1188
11801189 SmallVector MOs;
11811190 unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
11821191 for (unsigned i = NumOps - 4; i != NumOps; ++i)
11831192 MOs.push_back(LoadMI->getOperand(i));
1184 return foldMemoryOperand(MI, OpNum, MOs);
1185 }
1186
1187 MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
1188 SmallVectorImpl &UseOps,
1189 MachineInstr *LoadMI) const {
1190 // Check switch flag
1191 if (NoFusing) return NULL;
1192
1193 if (UseOps.size() == 1)
1194 return foldMemoryOperand(MI, UseOps[0], LoadMI);
1195 else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
1196 return NULL;
1197 unsigned NewOpc = 0;
1198 switch (MI->getOpcode()) {
1199 default: return NULL;
1200 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
1201 case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
1202 case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
1203 case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
1204 }
1205 // Change to CMPXXri r, 0 first.
1206 MI->setInstrDescriptor(TII.get(NewOpc));
1207 MI->getOperand(1).ChangeToImmediate(0);
1208 return foldMemoryOperand(MI, 0, LoadMI);
1193 return foldMemoryOperand(MI, Ops[0], MOs);
12091194 }
12101195
12111196
132132
133133 /// foldMemoryOperand - If this target supports it, fold a load or store of
134134 /// the specified stack slot into the specified machine instruction for the
135 /// specified operand. If this is possible, the target should perform the
135 /// specified operand(s). If this is possible, the target should perform the
136136 /// folding and return true, otherwise it should return false. If it folds
137137 /// the instruction, it is likely that the MachineInstruction the iterator
138138 /// references has been changed.
139139 MachineInstr* foldMemoryOperand(MachineInstr* MI,
140 unsigned OpNum,
141 int FrameIndex) const;
142
143 /// foldMemoryOperand - Same as previous except it tries to fold instruction
144 /// with multiple uses of the same register.
145 MachineInstr* foldMemoryOperand(MachineInstr* MI,
146 SmallVectorImpl &UseOps,
140 SmallVectorImpl &Ops,
147141 int FrameIndex) const;
148142
149143 /// foldMemoryOperand - Same as the previous version except it allows folding
150144 /// of any load and store from / to any address, not just from a specific
151145 /// stack slot.
152146 MachineInstr* foldMemoryOperand(MachineInstr* MI,
153 unsigned OpNum,
154 MachineInstr* LoadMI) const;
155
156 /// foldMemoryOperand - Same as the previous version except it allows folding
157 /// of any load and store from / to any address, not just from a specific
158 /// stack slot.
159 MachineInstr* foldMemoryOperand(MachineInstr* MI,
160 SmallVectorImpl &UseOps,
147 SmallVectorImpl &Ops,
161148 MachineInstr* LoadMI) const;
162149
163150 /// getOpcodeAfterMemoryFold - Returns the opcode of the would be new