llvm.org GIT mirror llvm / 6f158fa
Target: Remove unused entities. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@283690 91177308-0d34-0410-b5e6-96231b3b80d8 Peter Collingbourne 3 years ago
17 changed file(s) with 4 addition(s) and 181 deletion(s). Raw diff Collapse all Expand all
178178 virtual void adjustForHiPEPrologue(MachineFunction &MF,
179179 MachineBasicBlock &PrologueMBB) const {}
180180
181 /// Adjust the prologue to add an allocation at a fixed offset from the frame
182 /// pointer.
183 virtual void
184 adjustForFrameAllocatePrologue(MachineFunction &MF,
185 MachineBasicBlock &PrologueMBB) const {}
186
187181 /// spillCalleeSavedRegisters - Issues instruction(s) to spill all callee
188182 /// saved registers and returns true if it isn't possible / profitable to do
189183 /// so by issuing a series of store instructions via
610610 virtual void ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
611611 MachineBasicBlock *NewDest) const;
612612
613 /// Get an instruction that performs an unconditional branch to the given
614 /// symbol.
615 virtual void
616 getUnconditionalBranch(MCInst &MI,
617 const MCSymbolRefExpr *BranchTarget) const {
618 llvm_unreachable("Target didn't implement "
619 "TargetInstrInfo::getUnconditionalBranch!");
620 }
621
622 /// Get a machine trap instruction.
623 virtual void getTrap(MCInst &MI) const {
624 llvm_unreachable("Target didn't implement TargetInstrInfo::getTrap!");
625 }
626
627 /// Get a number of bytes that suffices to hold
628 /// either the instruction returned by getUnconditionalBranch or the
629 /// instruction returned by getTrap. This only makes sense because
630 /// getUnconditionalBranch returns a single, specific instruction. This
631 /// information is needed by the jumptable construction code, since it must
632 /// decide how many bytes to use for a jumptable entry so it can generate the
633 /// right mask.
634 ///
635 /// Note that if the jumptable instruction requires alignment, then that
636 /// alignment should be factored into this required bound so that the
637 /// resulting bound gives the right alignment for the instruction.
638 virtual unsigned getJumpInstrTableEntryBound() const {
639 // This method gets called by LLVMTargetMachine always, so it can't fail
640 // just because there happens to be no implementation for this target.
641 // Any code that tries to use a jumptable annotation without defining
642 // getUnconditionalBranch on the appropriate Target will fail anyway, and
643 // the value returned here won't matter in that case.
644 return 0;
645 }
646
647613 /// Return true if it's legal to split the given basic
648614 /// block at the specified instruction (i.e. instruction would be the start
649615 /// of a new basic block).
12921258 const MachineInstr &UseMI,
12931259 unsigned UseIdx) const;
12941260
1295 /// Compute and return the latency of the given data dependent def and use
1296 /// when the operand indices are already known. UseMI may be \c nullptr for
1297 /// an unknown use.
1298 ///
1299 /// FindMin may be set to get the minimum vs. expected latency. Minimum
1300 /// latency is used for scheduling groups, while expected latency is for
1301 /// instruction cost and critical path.
1302 ///
1303 /// Depending on the subtarget's itinerary properties, this may or may not
1304 /// need to call getOperandLatency(). For most subtargets, we don't need
1305 /// DefIdx or UseIdx to compute min latency.
1306 unsigned computeOperandLatency(const InstrItineraryData *ItinData,
1307 const MachineInstr &DefMI, unsigned DefIdx,
1308 const MachineInstr *UseMI,
1309 unsigned UseIdx) const;
1310
13111261 /// Compute the instruction latency of a given instruction.
13121262 /// If the instruction has higher cost when predicated, it's returned via
13131263 /// PredCost.
189189 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
190190 return getPointerTy(DL);
191191 }
192
193 /// Return true if the select operation is expensive for this target.
194 bool isSelectExpensive() const { return SelectIsExpensive; }
195192
196193 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
197194 return true;
13771374 StackPointerRegisterToSaveRestore = R;
13781375 }
13791376
1380 /// Tells the code generator not to expand operations into sequences that use
1381 /// the select operations if possible.
1382 void setSelectIsExpensive(bool isExpensive = true) {
1383 SelectIsExpensive = isExpensive;
1384 }
1385
13861377 /// Tells the code generator that the target has multiple (allocatable)
13871378 /// condition registers that can be used to store the results of comparisons
13881379 /// for use by selects and conditional branches. With multiple condition
14221413 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
14231414 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
14241415 RegClassForVT[VT.SimpleTy] = RC;
1425 }
1426
1427 /// Remove all register classes.
1428 void clearRegisterClasses() {
1429 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
1430 }
1431
1432 /// \brief Remove all operation actions.
1433 void clearOperationActions() {
14341416 }
14351417
14361418 /// Return the largest legal super-reg register class of the register class
17601742 /// In other words, unless the target performs a post-isel load combining,
17611743 /// this information should not be provided because it will generate more
17621744 /// loads.
1763 virtual bool hasPairedLoad(Type * /*LoadedType*/,
1764 unsigned & /*RequiredAligment*/) const {
1765 return false;
1766 }
1767
17681745 virtual bool hasPairedLoad(EVT /*LoadedType*/,
17691746 unsigned & /*RequiredAligment*/) const {
17701747 return false;
19131890
19141891 private:
19151892 const TargetMachine &TM;
1916
1917 /// Tells the code generator not to expand operations into sequences that use
1918 /// the select operations if possible.
1919 bool SelectIsExpensive;
19201893
19211894 /// Tells the code generator that the target has multiple (allocatable)
19221895 /// condition registers that can be used to store the results of comparisons
119119 getExplicitSectionGlobal(const GlobalValue *GV, SectionKind Kind,
120120 const TargetMachine &TM) const = 0;
121121
122 /// Allow the target to completely override section assignment of a global.
123 virtual const MCSection *
124 getSpecialCasedSectionGlobals(const GlobalValue *GV, SectionKind Kind) const {
125 return nullptr;
126 }
127
128122 /// Return an MCExpr to use for a reference to the specified global variable
129123 /// from exception handling information.
130124 virtual const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
192192
193193 bool shouldPrintMachineCode() const { return Options.PrintMachineCode; }
194194
195 /// Returns the default value of asm verbosity.
196 ///
197 bool getAsmVerbosityDefault() const {
198 return Options.MCOptions.AsmVerbose;
199 }
200
201195 bool getUniqueSectionNames() const { return Options.UniqueSectionNames; }
202196
203197 /// Return true if data objects should be emitted into their own section,
445445 ///
446446 virtual const MCPhysReg*
447447 getCalleeSavedRegs(const MachineFunction *MF) const = 0;
448
449 virtual const MCPhysReg*
450 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const {
451 return nullptr;
452 }
453448
454449 /// Return a mask of call-preserved registers for the given calling convention
455450 /// on the current function. The mask should include all call-preserved
10991099 return -1;
11001100 }
11011101
1102 unsigned TargetInstrInfo::computeOperandLatency(
1103 const InstrItineraryData *ItinData, const MachineInstr &DefMI,
1104 unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const {
1105
1106 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
1107 if (DefLatency >= 0)
1108 return DefLatency;
1109
1110 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
1111
1112 int OperLatency = 0;
1113 if (UseMI)
1114 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx);
1115 else {
1116 unsigned DefClass = DefMI.getDesc().getSchedClass();
1117 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
1118 }
1119 if (OperLatency >= 0)
1120 return OperLatency;
1121
1122 // No operand latency was found.
1123 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
1124
1125 // Expected latency is the max of the stage latency and itinerary props.
1126 InstrLatency = std::max(InstrLatency,
1127 defaultDefLatency(ItinData->SchedModel, DefMI));
1128 return InstrLatency;
1129 }
1130
11311102 bool TargetInstrInfo::getRegSequenceInputs(
11321103 const MachineInstr &MI, unsigned DefIdx,
11331104 SmallVectorImpl &InputRegs) const {
805805 = MaxStoresPerMemmoveOptSize = 4;
806806 UseUnderscoreSetJmp = false;
807807 UseUnderscoreLongJmp = false;
808 SelectIsExpensive = false;
809808 HasMultipleConditionRegisters = false;
810809 HasExtractBitsInsn = false;
811810 JumpIsExpensive = JumpIsExpensiveOverride;
70577057 return true;
70587058 }
70597059
7060 bool AArch64TargetLowering::hasPairedLoad(Type *LoadedType,
7061 unsigned &RequiredAligment) const {
7062 if (!LoadedType->isIntegerTy() && !LoadedType->isFloatTy())
7063 return false;
7064 // Cyclone supports unaligned accesses.
7065 RequiredAligment = 0;
7066 unsigned NumBits = LoadedType->getPrimitiveSizeInBits();
7067 return NumBits == 32 || NumBits == 64;
7068 }
7069
70707060 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
70717061 unsigned &RequiredAligment) const {
70727062 if (!LoadedType.isSimple() ||
308308 bool isZExtFree(EVT VT1, EVT VT2) const override;
309309 bool isZExtFree(SDValue Val, EVT VT2) const override;
310310
311 bool hasPairedLoad(Type *LoadedType,
312 unsigned &RequiredAligment) const override;
313311 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
314312
315313 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
3434 /// Code Generation virtual methods...
3535 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
3636 const MCPhysReg *
37 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
37 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
3838 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
3939 CallingConv::ID) const override;
4040
442442 // (Section 7.3)
443443 setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
444444
445 setSelectIsExpensive(false);
446445 PredictableSelectIsExpensive = false;
447446
448447 // We want to find all load dependencies for long chains of stores to enable
9898 /// Code Generation virtual methods...
9999 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
100100 const MCPhysReg *
101 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
101 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
102102 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
103103 CallingConv::ID) const override;
104104 const uint32_t *getNoPreservedMask() const override;
7474
7575 /// Code Generation virtual methods...
7676 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
77 const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
77 const MCPhysReg *getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
7878 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
7979 CallingConv::ID CC) const override;
8080 const uint32_t *getNoPreservedMask() const override;
80728072 NopInst.setOpcode(X86::NOOP);
80738073 }
80748074
8075 // This code must remain in sync with getJumpInstrTableEntryBound in this class!
8076 // In particular, getJumpInstrTableEntryBound must always return an upper bound
8077 // on the encoding lengths of the instructions generated by
8078 // getUnconditionalBranch and getTrap.
8079 void X86InstrInfo::getUnconditionalBranch(
8080 MCInst &Branch, const MCSymbolRefExpr *BranchTarget) const {
8081 Branch.setOpcode(X86::JMP_1);
8082 Branch.addOperand(MCOperand::createExpr(BranchTarget));
8083 }
8084
8085 // This code must remain in sync with getJumpInstrTableEntryBound in this class!
8086 // In particular, getJumpInstrTableEntryBound must always return an upper bound
8087 // on the encoding lengths of the instructions generated by
8088 // getUnconditionalBranch and getTrap.
8089 void X86InstrInfo::getTrap(MCInst &MI) const {
8090 MI.setOpcode(X86::TRAP);
8091 }
8092
8093 // See getTrap and getUnconditionalBranch for conditions on the value returned
8094 // by this function.
8095 unsigned X86InstrInfo::getJumpInstrTableEntryBound() const {
8096 // 5 bytes suffice: JMP_4 Symbol@PLT is uses 1 byte (E9) for the JMP_4 and 4
8097 // bytes for the symbol offset. And TRAP is ud2, which is two bytes (0F 0B).
8098 return 5;
8099 }
8100
81018075 bool X86InstrInfo::isHighLatencyDef(int opc) const {
81028076 switch (opc) {
81038077 default: return false;
487487 unsigned Size, unsigned Alignment,
488488 bool AllowCommute) const;
489489
490 void
491 getUnconditionalBranch(MCInst &Branch,
492 const MCSymbolRefExpr *BranchTarget) const override;
493
494 void getTrap(MCInst &MI) const override;
495
496 unsigned getJumpInstrTableEntryBound() const override;
497
498490 bool isHighLatencyDef(int opc) const override;
499491
500492 bool hasHighOperandLatency(const TargetSchedModel &SchedModel,
9999 const MCPhysReg *
100100 getCalleeSavedRegs(const MachineFunction* MF) const override;
101101 const MCPhysReg *
102 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const override;
102 getCalleeSavedRegsViaCopy(const MachineFunction *MF) const;
103103 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
104104 CallingConv::ID) const override;
105105 const uint32_t *getNoPreservedMask() const override;