llvm.org GIT mirror llvm / 600f171
RISC architectures get their memory operand folding for free. The only folding these load/store architectures can do is converting COPY into a load or store, and the target independent part of foldMemoryOperand already knows how to do that. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@108099 91177308-0d34-0410-b5e6-96231b3b80d8 Jakob Stoklund Olesen 10 years ago
16 changed file(s) with 0 addition(s) and 812 deletion(s). Raw diff Collapse all Expand all
948948 return &*MIB;
949949 }
950950
951 MachineInstr *ARMBaseInstrInfo::
952 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
953 const SmallVectorImpl &Ops, int FI) const {
954 if (Ops.size() != 1) return NULL;
955
956 unsigned OpNum = Ops[0];
957 unsigned Opc = MI->getOpcode();
958 MachineInstr *NewMI = NULL;
959 if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
960 // If it is updating CPSR, then it cannot be folded.
961 if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
962 return NULL;
963 unsigned Pred = MI->getOperand(2).getImm();
964 unsigned PredReg = MI->getOperand(3).getReg();
965 if (OpNum == 0) { // move -> store
966 unsigned SrcReg = MI->getOperand(1).getReg();
967 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
968 bool isKill = MI->getOperand(1).isKill();
969 bool isUndef = MI->getOperand(1).isUndef();
970 if (Opc == ARM::MOVr)
971 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
972 .addReg(SrcReg,
973 getKillRegState(isKill) | getUndefRegState(isUndef),
974 SrcSubReg)
975 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
976 else // ARM::t2MOVr
977 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
978 .addReg(SrcReg,
979 getKillRegState(isKill) | getUndefRegState(isUndef),
980 SrcSubReg)
981 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
982 } else { // move -> load
983 unsigned DstReg = MI->getOperand(0).getReg();
984 unsigned DstSubReg = MI->getOperand(0).getSubReg();
985 bool isDead = MI->getOperand(0).isDead();
986 bool isUndef = MI->getOperand(0).isUndef();
987 if (Opc == ARM::MOVr)
988 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
989 .addReg(DstReg,
990 RegState::Define |
991 getDeadRegState(isDead) |
992 getUndefRegState(isUndef), DstSubReg)
993 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
994 else // ARM::t2MOVr
995 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
996 .addReg(DstReg,
997 RegState::Define |
998 getDeadRegState(isDead) |
999 getUndefRegState(isUndef), DstSubReg)
1000 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1001 }
1002 } else if (Opc == ARM::tMOVgpr2gpr ||
1003 Opc == ARM::tMOVtgpr2gpr ||
1004 Opc == ARM::tMOVgpr2tgpr) {
1005 if (OpNum == 0) { // move -> store
1006 unsigned SrcReg = MI->getOperand(1).getReg();
1007 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1008 bool isKill = MI->getOperand(1).isKill();
1009 bool isUndef = MI->getOperand(1).isUndef();
1010 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
1011 .addReg(SrcReg,
1012 getKillRegState(isKill) | getUndefRegState(isUndef),
1013 SrcSubReg)
1014 .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
1015 } else { // move -> load
1016 unsigned DstReg = MI->getOperand(0).getReg();
1017 unsigned DstSubReg = MI->getOperand(0).getSubReg();
1018 bool isDead = MI->getOperand(0).isDead();
1019 bool isUndef = MI->getOperand(0).isUndef();
1020 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
1021 .addReg(DstReg,
1022 RegState::Define |
1023 getDeadRegState(isDead) |
1024 getUndefRegState(isUndef),
1025 DstSubReg)
1026 .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
1027 }
1028 } else if (Opc == ARM::VMOVS) {
1029 unsigned Pred = MI->getOperand(2).getImm();
1030 unsigned PredReg = MI->getOperand(3).getReg();
1031 if (OpNum == 0) { // move -> store
1032 unsigned SrcReg = MI->getOperand(1).getReg();
1033 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1034 bool isKill = MI->getOperand(1).isKill();
1035 bool isUndef = MI->getOperand(1).isUndef();
1036 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
1037 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
1038 SrcSubReg)
1039 .addFrameIndex(FI)
1040 .addImm(0).addImm(Pred).addReg(PredReg);
1041 } else { // move -> load
1042 unsigned DstReg = MI->getOperand(0).getReg();
1043 unsigned DstSubReg = MI->getOperand(0).getSubReg();
1044 bool isDead = MI->getOperand(0).isDead();
1045 bool isUndef = MI->getOperand(0).isUndef();
1046 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
1047 .addReg(DstReg,
1048 RegState::Define |
1049 getDeadRegState(isDead) |
1050 getUndefRegState(isUndef),
1051 DstSubReg)
1052 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1053 }
1054 } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVDneon) {
1055 unsigned Pred = MI->getOperand(2).getImm();
1056 unsigned PredReg = MI->getOperand(3).getReg();
1057 if (OpNum == 0) { // move -> store
1058 unsigned SrcReg = MI->getOperand(1).getReg();
1059 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1060 bool isKill = MI->getOperand(1).isKill();
1061 bool isUndef = MI->getOperand(1).isUndef();
1062 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
1063 .addReg(SrcReg,
1064 getKillRegState(isKill) | getUndefRegState(isUndef),
1065 SrcSubReg)
1066 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1067 } else { // move -> load
1068 unsigned DstReg = MI->getOperand(0).getReg();
1069 unsigned DstSubReg = MI->getOperand(0).getSubReg();
1070 bool isDead = MI->getOperand(0).isDead();
1071 bool isUndef = MI->getOperand(0).isUndef();
1072 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
1073 .addReg(DstReg,
1074 RegState::Define |
1075 getDeadRegState(isDead) |
1076 getUndefRegState(isUndef),
1077 DstSubReg)
1078 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
1079 }
1080 } else if (Opc == ARM::VMOVQ) {
1081 MachineFrameInfo &MFI = *MF.getFrameInfo();
1082 unsigned Pred = MI->getOperand(2).getImm();
1083 unsigned PredReg = MI->getOperand(3).getReg();
1084 if (OpNum == 0) { // move -> store
1085 unsigned SrcReg = MI->getOperand(1).getReg();
1086 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
1087 bool isKill = MI->getOperand(1).isKill();
1088 bool isUndef = MI->getOperand(1).isUndef();
1089 if (MFI.getObjectAlignment(FI) >= 16 &&
1090 getRegisterInfo().canRealignStack(MF)) {
1091 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VST1q))
1092 .addFrameIndex(FI).addImm(16)
1093 .addReg(SrcReg,
1094 getKillRegState(isKill) | getUndefRegState(isUndef),
1095 SrcSubReg)
1096 .addImm(Pred).addReg(PredReg);
1097 } else {
1098 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTMQ))
1099 .addReg(SrcReg,
1100 getKillRegState(isKill) | getUndefRegState(isUndef),
1101 SrcSubReg)
1102 .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
1103 .addImm(Pred).addReg(PredReg);
1104 }
1105 } else { // move -> load
1106 unsigned DstReg = MI->getOperand(0).getReg();
1107 unsigned DstSubReg = MI->getOperand(0).getSubReg();
1108 bool isDead = MI->getOperand(0).isDead();
1109 bool isUndef = MI->getOperand(0).isUndef();
1110 if (MFI.getObjectAlignment(FI) >= 16 &&
1111 getRegisterInfo().canRealignStack(MF)) {
1112 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLD1q))
1113 .addReg(DstReg,
1114 RegState::Define |
1115 getDeadRegState(isDead) |
1116 getUndefRegState(isUndef),
1117 DstSubReg)
1118 .addFrameIndex(FI).addImm(16).addImm(Pred).addReg(PredReg);
1119 } else {
1120 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDMQ))
1121 .addReg(DstReg,
1122 RegState::Define |
1123 getDeadRegState(isDead) |
1124 getUndefRegState(isUndef),
1125 DstSubReg)
1126 .addFrameIndex(FI).addImm(ARM_AM::getAM5Opc(ARM_AM::ia, 4))
1127 .addImm(Pred).addReg(PredReg);
1128 }
1129 }
1130 }
1131
1132 return NewMI;
1133 }
1134
1135 MachineInstr*
1136 ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
1137 MachineInstr* MI,
1138 const SmallVectorImpl &Ops,
1139 MachineInstr* LoadMI) const {
1140 // FIXME
1141 return 0;
1142 }
1143
1144 bool
1145 ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
1146 const SmallVectorImpl &Ops) const {
1147 if (Ops.size() != 1) return false;
1148
1149 unsigned Opc = MI->getOpcode();
1150 if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
1151 // If it is updating CPSR, then it cannot be folded.
1152 return MI->getOperand(4).getReg() != ARM::CPSR ||
1153 MI->getOperand(4).isDead();
1154 } else if (Opc == ARM::tMOVgpr2gpr ||
1155 Opc == ARM::tMOVtgpr2gpr ||
1156 Opc == ARM::tMOVgpr2tgpr) {
1157 return true;
1158 } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD ||
1159 Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
1160 return true;
1161 }
1162
1163 // FIXME: VMOVQQ and VMOVQQQQ?
1164
1165 return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
1166 }
1167
1168951 /// Create a copy of a const pool value. Update CPI to the new index and return
1169952 /// the label UID.
1170953 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
295295 const MDNode *MDPtr,
296296 DebugLoc DL) const;
297297
298 virtual bool canFoldMemoryOperand(const MachineInstr *MI,
299 const SmallVectorImpl &Ops) const;
300
301 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
302 MachineInstr* MI,
303 const SmallVectorImpl &Ops,
304 int FrameIndex) const;
305
306 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
307 MachineInstr* MI,
308 const SmallVectorImpl &Ops,
309 MachineInstr* LoadMI) const;
310
311298 virtual void reMaterialize(MachineBasicBlock &MBB,
312299 MachineBasicBlock::iterator MI,
313300 unsigned DestReg, unsigned SubIdx,
5050 .addReg(SrcReg, getKillRegState(KillSrc));
5151 assert(ARM::GPRRegClass.contains(DestReg, SrcReg) &&
5252 "Thumb1 can only copy GPR registers");
53 }
54
55 bool Thumb1InstrInfo::
56 canFoldMemoryOperand(const MachineInstr *MI,
57 const SmallVectorImpl &Ops) const {
58 if (Ops.size() != 1) return false;
59
60 unsigned OpNum = Ops[0];
61 unsigned Opc = MI->getOpcode();
62 switch (Opc) {
63 default: break;
64 case ARM::tMOVr:
65 case ARM::tMOVtgpr2gpr:
66 case ARM::tMOVgpr2tgpr:
67 case ARM::tMOVgpr2gpr: {
68 if (OpNum == 0) { // move -> store
69 unsigned SrcReg = MI->getOperand(1).getReg();
70 if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
71 !isARMLowRegister(SrcReg))
72 // tSpill cannot take a high register operand.
73 return false;
74 } else { // move -> load
75 unsigned DstReg = MI->getOperand(0).getReg();
76 if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
77 !isARMLowRegister(DstReg))
78 // tRestore cannot target a high register operand.
79 return false;
80 }
81 return true;
82 }
83 }
84
85 return false;
8653 }
8754
8855 void Thumb1InstrInfo::
213180
214181 return true;
215182 }
216
217 MachineInstr *Thumb1InstrInfo::
218 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
219 const SmallVectorImpl &Ops, int FI) const {
220 if (Ops.size() != 1) return NULL;
221
222 unsigned OpNum = Ops[0];
223 unsigned Opc = MI->getOpcode();
224 MachineInstr *NewMI = NULL;
225 switch (Opc) {
226 default: break;
227 case ARM::tMOVr:
228 case ARM::tMOVtgpr2gpr:
229 case ARM::tMOVgpr2tgpr:
230 case ARM::tMOVgpr2gpr: {
231 if (OpNum == 0) { // move -> store
232 unsigned SrcReg = MI->getOperand(1).getReg();
233 bool isKill = MI->getOperand(1).isKill();
234 if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
235 !isARMLowRegister(SrcReg))
236 // tSpill cannot take a high register operand.
237 break;
238 NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
239 .addReg(SrcReg, getKillRegState(isKill))
240 .addFrameIndex(FI).addImm(0));
241 } else { // move -> load
242 unsigned DstReg = MI->getOperand(0).getReg();
243 if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
244 !isARMLowRegister(DstReg))
245 // tRestore cannot target a high register operand.
246 break;
247 bool isDead = MI->getOperand(0).isDead();
248 NewMI = AddDefaultPred(BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
249 .addReg(DstReg,
250 RegState::Define | getDeadRegState(isDead))
251 .addFrameIndex(FI).addImm(0));
252 }
253 break;
254 }
255 }
256
257 return NewMI;
258 }
6161 const TargetRegisterClass *RC,
6262 const TargetRegisterInfo *TRI) const;
6363
64 bool canFoldMemoryOperand(const MachineInstr *MI,
65 const SmallVectorImpl &Ops) const;
66
67 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
68 MachineInstr* MI,
69 const SmallVectorImpl &Ops,
70 int FrameIndex) const;
71
72 MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
73 MachineInstr* MI,
74 const SmallVectorImpl &Ops,
75 MachineInstr* LoadMI) const {
76 return 0;
77 }
7864 };
7965 }
8066
214214 llvm_unreachable("Unhandled register class");
215215 }
216216
217 MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
218 MachineInstr *MI,
219 const SmallVectorImpl &Ops,
220 int FrameIndex) const {
221 if (Ops.size() != 1) return NULL;
222
223 // Make sure this is a reg-reg copy.
224 unsigned Opc = MI->getOpcode();
225
226 MachineInstr *NewMI = NULL;
227 switch(Opc) {
228 default:
229 break;
230 case Alpha::BISr:
231 case Alpha::CPYSS:
232 case Alpha::CPYST:
233 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
234 if (Ops[0] == 0) { // move -> store
235 unsigned InReg = MI->getOperand(1).getReg();
236 bool isKill = MI->getOperand(1).isKill();
237 bool isUndef = MI->getOperand(1).isUndef();
238 Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
239 ((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);
240 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
241 .addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef))
242 .addFrameIndex(FrameIndex)
243 .addReg(Alpha::F31);
244 } else { // load -> move
245 unsigned OutReg = MI->getOperand(0).getReg();
246 bool isDead = MI->getOperand(0).isDead();
247 bool isUndef = MI->getOperand(0).isUndef();
248 Opc = (Opc == Alpha::BISr) ? Alpha::LDQ :
249 ((Opc == Alpha::CPYSS) ? Alpha::LDS : Alpha::LDT);
250 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
251 .addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
252 getUndefRegState(isUndef))
253 .addFrameIndex(FrameIndex)
254 .addReg(Alpha::F31);
255 }
256 }
257 break;
258 }
259 return NewMI;
260 }
261
262217 static unsigned AlphaRevCondCode(unsigned Opcode) {
263218 switch (Opcode) {
264219 case Alpha::BEQ: return Alpha::BNE;
6060 const TargetRegisterClass *RC,
6161 const TargetRegisterInfo *TRI) const;
6262
63 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
64 MachineInstr* MI,
65 const SmallVectorImpl &Ops,
66 int FrameIndex) const;
67
68 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
69 MachineInstr* MI,
70 const SmallVectorImpl &Ops,
71 MachineInstr* LoadMI) const {
72 return 0;
73 }
74
7563 bool AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
7664 MachineBasicBlock *&FBB,
7765 SmallVectorImpl &Cond,
331331 addFrameReference(BuildMI(MBB, MI, DL, get(opc), DestReg), FrameIdx);
332332 }
333333
334 //! Return true if the specified load or store can be folded
335 bool
336 SPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
337 const SmallVectorImpl &Ops) const {
338 if (Ops.size() != 1) return false;
339
340 // Make sure this is a reg-reg copy.
341 unsigned Opc = MI->getOpcode();
342
343 switch (Opc) {
344 case SPU::ORv16i8:
345 case SPU::ORv8i16:
346 case SPU::ORv4i32:
347 case SPU::ORv2i64:
348 case SPU::ORr8:
349 case SPU::ORr16:
350 case SPU::ORr32:
351 case SPU::ORr64:
352 case SPU::ORf32:
353 case SPU::ORf64:
354 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg())
355 return true;
356 break;
357 }
358
359 return false;
360 }
361
362 /// foldMemoryOperand - SPU, like PPC, can only fold spills into
363 /// copy instructions, turning them into load/store instructions.
364 MachineInstr *
365 SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
366 MachineInstr *MI,
367 const SmallVectorImpl &Ops,
368 int FrameIndex) const
369 {
370 if (Ops.size() != 1) return 0;
371
372 unsigned OpNum = Ops[0];
373 unsigned Opc = MI->getOpcode();
374 MachineInstr *NewMI = 0;
375
376 switch (Opc) {
377 case SPU::ORv16i8:
378 case SPU::ORv8i16:
379 case SPU::ORv4i32:
380 case SPU::ORv2i64:
381 case SPU::ORr8:
382 case SPU::ORr16:
383 case SPU::ORr32:
384 case SPU::ORr64:
385 case SPU::ORf32:
386 case SPU::ORf64:
387 if (OpNum == 0) { // move -> store
388 unsigned InReg = MI->getOperand(1).getReg();
389 bool isKill = MI->getOperand(1).isKill();
390 bool isUndef = MI->getOperand(1).isUndef();
391 if (FrameIndex < SPUFrameInfo::maxFrameOffset()) {
392 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(),
393 get(SPU::STQDr32));
394
395 MIB.addReg(InReg, getKillRegState(isKill) | getUndefRegState(isUndef));
396 NewMI = addFrameReference(MIB, FrameIndex);
397 }
398 } else { // move -> load
399 unsigned OutReg = MI->getOperand(0).getReg();
400 bool isDead = MI->getOperand(0).isDead();
401 bool isUndef = MI->getOperand(0).isUndef();
402 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(Opc));
403
404 MIB.addReg(OutReg, RegState::Define | getDeadRegState(isDead) |
405 getUndefRegState(isUndef));
406 Opc = (FrameIndex < SPUFrameInfo::maxFrameOffset())
407 ? SPU::STQDr32 : SPU::STQXr32;
408 NewMI = addFrameReference(MIB, FrameIndex);
409 break;
410 }
411 }
412
413 return NewMI;
414 }
415
416334 //! Branch analysis
417335 /*!
418336 \note This code was kiped from PPC. There may be more branch analysis for
2222 class SPUInstrInfo : public TargetInstrInfoImpl {
2323 SPUTargetMachine &TM;
2424 const SPURegisterInfo RI;
25 protected:
26 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
27 MachineInstr* MI,
28 const SmallVectorImpl &Ops,
29 int FrameIndex) const;
30
31 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
32 MachineInstr* MI,
33 const SmallVectorImpl &Ops,
34 MachineInstr* LoadMI) const {
35 return 0;
36 }
37
3825 public:
3926 explicit SPUInstrInfo(SPUTargetMachine &tm);
4027
7461 const TargetRegisterClass *RC,
7562 const TargetRegisterInfo *TRI) const;
7663
77 //! Return true if the specified load or store can be folded
78 virtual
79 bool canFoldMemoryOperand(const MachineInstr *MI,
80 const SmallVectorImpl &Ops) const;
81
8264 //! Reverses a branch's condition, returning false on success.
8365 virtual
8466 bool ReverseBranchCondition(SmallVectorImpl &Cond) const;
138138 .addImm(0).addFrameIndex(FI);
139139 }
140140
141 MachineInstr *MBlazeInstrInfo::
142 foldMemoryOperandImpl(MachineFunction &MF,
143 MachineInstr* MI,
144 const SmallVectorImpl &Ops, int FI) const {
145 if (Ops.size() != 1) return NULL;
146
147 MachineInstr *NewMI = NULL;
148
149 switch (MI->getOpcode()) {
150 case MBlaze::OR:
151 case MBlaze::ADD:
152 if ((MI->getOperand(0).isReg()) &&
153 (MI->getOperand(2).isReg()) &&
154 (MI->getOperand(2).getReg() == MBlaze::R0) &&
155 (MI->getOperand(1).isReg())) {
156 if (Ops[0] == 0) { // COPY -> STORE
157 unsigned SrcReg = MI->getOperand(1).getReg();
158 bool isKill = MI->getOperand(1).isKill();
159 bool isUndef = MI->getOperand(1).isUndef();
160 NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::SW))
161 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
162 .addImm(0).addFrameIndex(FI);
163 } else { // COPY -> LOAD
164 unsigned DstReg = MI->getOperand(0).getReg();
165 bool isDead = MI->getOperand(0).isDead();
166 bool isUndef = MI->getOperand(0).isUndef();
167 NewMI = BuildMI(MF, MI->getDebugLoc(), get(MBlaze::LW))
168 .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
169 getUndefRegState(isUndef))
170 .addImm(0).addFrameIndex(FI);
171 }
172 }
173 break;
174 }
175
176 return NewMI;
177 }
178
179141 //===----------------------------------------------------------------------===//
180142 // Branch Analysis
181143 //===----------------------------------------------------------------------===//
215215 const TargetRegisterClass *RC,
216216 const TargetRegisterInfo *TRI) const;
217217
218 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
219 MachineInstr* MI,
220 const SmallVectorImpl &Ops,
221 int FrameIndex) const;
222
223 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
224 MachineInstr* MI,
225 const SmallVectorImpl &Ops,
226 MachineInstr* LoadMI) const {
227 return 0;
228 }
229
230218 /// Insert nop instruction when hazard condition is found
231219 virtual void insertNoop(MachineBasicBlock &MBB,
232220 MachineBasicBlock::iterator MI) const;
258258 }
259259 } else
260260 llvm_unreachable("Register class not handled!");
261 }
262
263 MachineInstr *MipsInstrInfo::
264 foldMemoryOperandImpl(MachineFunction &MF,
265 MachineInstr* MI,
266 const SmallVectorImpl &Ops, int FI) const
267 {
268 if (Ops.size() != 1) return NULL;
269
270 MachineInstr *NewMI = NULL;
271
272 switch (MI->getOpcode()) {
273 case Mips::ADDu:
274 if ((MI->getOperand(0).isReg()) &&
275 (MI->getOperand(1).isReg()) &&
276 (MI->getOperand(1).getReg() == Mips::ZERO) &&
277 (MI->getOperand(2).isReg())) {
278 if (Ops[0] == 0) { // COPY -> STORE
279 unsigned SrcReg = MI->getOperand(2).getReg();
280 bool isKill = MI->getOperand(2).isKill();
281 bool isUndef = MI->getOperand(2).isUndef();
282 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::SW))
283 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
284 .addImm(0).addFrameIndex(FI);
285 } else { // COPY -> LOAD
286 unsigned DstReg = MI->getOperand(0).getReg();
287 bool isDead = MI->getOperand(0).isDead();
288 bool isUndef = MI->getOperand(0).isUndef();
289 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Mips::LW))
290 .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
291 getUndefRegState(isUndef))
292 .addImm(0).addFrameIndex(FI);
293 }
294 }
295 break;
296 case Mips::FMOV_S32:
297 case Mips::FMOV_D32:
298 if ((MI->getOperand(0).isReg()) &&
299 (MI->getOperand(1).isReg())) {
300 const TargetRegisterClass
301 *RC = RI.getRegClass(MI->getOperand(0).getReg());
302 unsigned StoreOpc, LoadOpc;
303 bool IsMips1 = TM.getSubtarget().isMips1();
304
305 if (RC == Mips::FGR32RegisterClass) {
306 LoadOpc = Mips::LWC1; StoreOpc = Mips::SWC1;
307 } else {
308 assert(RC == Mips::AFGR64RegisterClass);
309 // Mips1 doesn't have ldc/sdc instructions.
310 if (IsMips1) break;
311 LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1;
312 }
313
314 if (Ops[0] == 0) { // COPY -> STORE
315 unsigned SrcReg = MI->getOperand(1).getReg();
316 bool isKill = MI->getOperand(1).isKill();
317 bool isUndef = MI->getOperand(2).isUndef();
318 NewMI = BuildMI(MF, MI->getDebugLoc(), get(StoreOpc))
319 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
320 .addImm(0).addFrameIndex(FI) ;
321 } else { // COPY -> LOAD
322 unsigned DstReg = MI->getOperand(0).getReg();
323 bool isDead = MI->getOperand(0).isDead();
324 bool isUndef = MI->getOperand(0).isUndef();
325 NewMI = BuildMI(MF, MI->getDebugLoc(), get(LoadOpc))
326 .addReg(DstReg, RegState::Define | getDeadRegState(isDead) |
327 getUndefRegState(isUndef))
328 .addImm(0).addFrameIndex(FI);
329 }
330 }
331 break;
332 }
333
334 return NewMI;
335261 }
336262
337263 //===----------------------------------------------------------------------===//
221221 const TargetRegisterClass *RC,
222222 const TargetRegisterInfo *TRI) const;
223223
224 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
225 MachineInstr* MI,
226 const SmallVectorImpl &Ops,
227 int FrameIndex) const;
228
229 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
230 MachineInstr* MI,
231 const SmallVectorImpl &Ops,
232 MachineInstr* LoadMI) const {
233 return 0;
234 }
235
236224 virtual
237225 bool ReverseBranchCondition(SmallVectorImpl &Cond) const;
238226
648648 return &*MIB;
649649 }
650650
651 /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
652 /// copy instructions, turning them into load/store instructions.
653 MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
654 MachineInstr *MI,
655 const SmallVectorImpl &Ops,
656 int FrameIndex) const {
657 if (Ops.size() != 1) return NULL;
658
659 // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
660 // it takes more than one instruction to store it.
661 unsigned Opc = MI->getOpcode();
662 unsigned OpNum = Ops[0];
663
664 MachineInstr *NewMI = NULL;
665 if ((Opc == PPC::OR &&
666 MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
667 if (OpNum == 0) { // move -> store
668 unsigned InReg = MI->getOperand(1).getReg();
669 bool isKill = MI->getOperand(1).isKill();
670 bool isUndef = MI->getOperand(1).isUndef();
671 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STW))
672 .addReg(InReg,
673 getKillRegState(isKill) |
674 getUndefRegState(isUndef)),
675 FrameIndex);
676 } else { // move -> load
677 unsigned OutReg = MI->getOperand(0).getReg();
678 bool isDead = MI->getOperand(0).isDead();
679 bool isUndef = MI->getOperand(0).isUndef();
680 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LWZ))
681 .addReg(OutReg,
682 RegState::Define |
683 getDeadRegState(isDead) |
684 getUndefRegState(isUndef)),
685 FrameIndex);
686 }
687 } else if ((Opc == PPC::OR8 &&
688 MI->getOperand(1).getReg() == MI->getOperand(2).getReg())) {
689 if (OpNum == 0) { // move -> store
690 unsigned InReg = MI->getOperand(1).getReg();
691 bool isKill = MI->getOperand(1).isKill();
692 bool isUndef = MI->getOperand(1).isUndef();
693 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::STD))
694 .addReg(InReg,
695 getKillRegState(isKill) |
696 getUndefRegState(isUndef)),
697 FrameIndex);
698 } else { // move -> load
699 unsigned OutReg = MI->getOperand(0).getReg();
700 bool isDead = MI->getOperand(0).isDead();
701 bool isUndef = MI->getOperand(0).isUndef();
702 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(), get(PPC::LD))
703 .addReg(OutReg,
704 RegState::Define |
705 getDeadRegState(isDead) |
706 getUndefRegState(isUndef)),
707 FrameIndex);
708 }
709 } else if (Opc == PPC::FMR || Opc == PPC::FMRSD) {
710 // The register may be F4RC or F8RC, and that determines the memory op.
711 unsigned OrigReg = MI->getOperand(OpNum).getReg();
712 // We cannot tell the register class from a physreg alone.
713 if (TargetRegisterInfo::isPhysicalRegister(OrigReg))
714 return NULL;
715 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(OrigReg);
716 const bool is64 = RC == PPC::F8RCRegisterClass;
717
718 if (OpNum == 0) { // move -> store
719 unsigned InReg = MI->getOperand(1).getReg();
720 bool isKill = MI->getOperand(1).isKill();
721 bool isUndef = MI->getOperand(1).isUndef();
722 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
723 get(is64 ? PPC::STFD : PPC::STFS))
724 .addReg(InReg,
725 getKillRegState(isKill) |
726 getUndefRegState(isUndef)),
727 FrameIndex);
728 } else { // move -> load
729 unsigned OutReg = MI->getOperand(0).getReg();
730 bool isDead = MI->getOperand(0).isDead();
731 bool isUndef = MI->getOperand(0).isUndef();
732 NewMI = addFrameReference(BuildMI(MF, MI->getDebugLoc(),
733 get(is64 ? PPC::LFD : PPC::LFS))
734 .addReg(OutReg,
735 RegState::Define |
736 getDeadRegState(isDead) |
737 getUndefRegState(isUndef)),
738 FrameIndex);
739 }
740 }
741
742 return NewMI;
743 }
744
745 bool PPCInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
746 const SmallVectorImpl &Ops) const {
747 if (Ops.size() != 1) return false;
748
749 // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
750 // it takes more than one instruction to store it.
751 unsigned Opc = MI->getOpcode();
752
753 if ((Opc == PPC::OR &&
754 MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
755 return true;
756 else if ((Opc == PPC::OR8 &&
757 MI->getOperand(1).getReg() == MI->getOperand(2).getReg()))
758 return true;
759 else if (Opc == PPC::FMR || Opc == PPC::FMRSD)
760 return true;
761
762 return false;
763 }
764
765
766651 bool PPCInstrInfo::
767652 ReverseBranchCondition(SmallVectorImpl &Cond) const {
768653 assert(Cond.size() == 2 && "Invalid PPC branch opcode!");
133133 const MDNode *MDPtr,
134134 DebugLoc DL) const;
135135
136 /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
137 /// copy instructions, turning them into load/store instructions.
138 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
139 MachineInstr* MI,
140 const SmallVectorImpl &Ops,
141 int FrameIndex) const;
142
143 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
144 MachineInstr* MI,
145 const SmallVectorImpl &Ops,
146 MachineInstr* LoadMI) const {
147 return 0;
148 }
149
150 virtual bool canFoldMemoryOperand(const MachineInstr *MI,
151 const SmallVectorImpl &Ops) const;
152
153136 virtual
154137 bool ReverseBranchCondition(SmallVectorImpl &Cond) const;
155138
173173 llvm_unreachable("Can't load this register from stack slot");
174174 }
175175
176 MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
177 MachineInstr* MI,
178 const SmallVectorImpl &Ops,
179 int FI) const {
180 if (Ops.size() != 1) return NULL;
181
182 unsigned OpNum = Ops[0];
183 bool isFloat = false;
184 MachineInstr *NewMI = NULL;
185 switch (MI->getOpcode()) {
186 case SP::ORrr:
187 if (MI->getOperand(1).isReg() && MI->getOperand(1).getReg() == SP::G0&&
188 MI->getOperand(0).isReg() && MI->getOperand(2).isReg()) {
189 if (OpNum == 0) // COPY -> STORE
190 NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::STri))
191 .addFrameIndex(FI)
192 .addImm(0)
193 .addReg(MI->getOperand(2).getReg());
194 else // COPY -> LOAD
195 NewMI = BuildMI(MF, MI->getDebugLoc(), get(SP::LDri),
196 MI->getOperand(0).getReg())
197 .addFrameIndex(FI)
198 .addImm(0);
199 }
200 break;
201 case SP::FMOVS:
202 isFloat = true;
203 // FALLTHROUGH
204 case SP::FMOVD:
205 if (OpNum == 0) { // COPY -> STORE
206 unsigned SrcReg = MI->getOperand(1).getReg();
207 bool isKill = MI->getOperand(1).isKill();
208 bool isUndef = MI->getOperand(1).isUndef();
209 NewMI = BuildMI(MF, MI->getDebugLoc(),
210 get(isFloat ? SP::STFri : SP::STDFri))
211 .addFrameIndex(FI)
212 .addImm(0)
213 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef));
214 } else { // COPY -> LOAD
215 unsigned DstReg = MI->getOperand(0).getReg();
216 bool isDead = MI->getOperand(0).isDead();
217 bool isUndef = MI->getOperand(0).isUndef();
218 NewMI = BuildMI(MF, MI->getDebugLoc(),
219 get(isFloat ? SP::LDFri : SP::LDDFri))
220 .addReg(DstReg, RegState::Define |
221 getDeadRegState(isDead) | getUndefRegState(isUndef))
222 .addFrameIndex(FI)
223 .addImm(0);
224 }
225 break;
226 }
227
228 return NewMI;
229 }
230
231176 unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
232177 {
233178 SparcMachineFunctionInfo *SparcFI = MF->getInfo();
8787 const TargetRegisterClass *RC,
8888 const TargetRegisterInfo *TRI) const;
8989
90 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
91 MachineInstr* MI,
92 const SmallVectorImpl &Ops,
93 int FrameIndex) const;
94
95 virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
96 MachineInstr* MI,
97 const SmallVectorImpl &Ops,
98 MachineInstr* LoadMI) const {
99 return 0;
100 }
101
10290 unsigned getGlobalBaseReg(MachineFunction *MF) const;
10391 };
10492