llvm.org GIT mirror llvm / d318139
MachineFunction: Return reference from getFunction(); NFC The Function can never be nullptr so we can return a reference. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@320884 91177308-0d34-0410-b5e6-96231b3b80d8 Matthias Braun 2 years ago
242 changed file(s) with 835 addition(s) and 849 deletion(s). Raw diff Collapse all Expand all
13401340 << ", int = " << getBlockFreq(&BB).getFrequency();
13411341 if (Optional ProfileCount =
13421342 BlockFrequencyInfoImplBase::getBlockProfileCount(
1343 *F->getFunction(), getNode(&BB)))
1343 F->getFunction(), getNode(&BB)))
13441344 OS << ", count = " << ProfileCount.getValue();
13451345 if (Optional IrrLoopHeaderWeight =
13461346 BB.getIrrLoopHeaderWeight())
379379 /// Return the DataLayout attached to the Module associated to this MF.
380380 const DataLayout &getDataLayout() const;
381381
382 /// getFunction - Return the LLVM function that this machine code represents
383 const Function *getFunction() const { return &F; }
382 /// Return the LLVM function that this machine code represents
383 const Function &getFunction() const { return F; }
384384
385385 /// getName - Return the name of the corresponding LLVM function.
386386 StringRef getName() const;
3232 const DiagnosticLocation &Loc,
3333 const MachineBasicBlock *MBB)
3434 : DiagnosticInfoOptimizationBase(Kind, DS_Remark, PassName, RemarkName,
35 *MBB->getParent()->getFunction(), Loc),
35 MBB->getParent()->getFunction(), Loc),
3636 MBB(MBB) {}
3737
3838 /// MI-specific kinds of diagnostic Arguments.
158158 /// (1) to filter trivial false positives or (2) to provide more context so
159159 /// that non-trivial false positives can be quickly detected by the user.
160160 bool allowExtraAnalysis(StringRef PassName) const {
161 return (MF.getFunction()->getContext().getDiagnosticsOutputFile() ||
162 MF.getFunction()->getContext()
161 return (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
162 MF.getFunction().getContext()
163163 .getDiagHandlerPtr()->isAnyRemarkEnabled(PassName));
164164 }
165165
171171 // remarks enabled. We can't currently check whether remarks are requested
172172 // for the calling pass since that requires actually building the remark.
173173
174 if (MF.getFunction()->getContext().getDiagnosticsOutputFile() ||
175 MF.getFunction()->getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
174 if (MF.getFunction().getContext().getDiagnosticsOutputFile() ||
175 MF.getFunction().getContext().getDiagHandlerPtr()->isAnyRemarkEnabled()) {
176176 auto R = RemarkBuilder();
177177 emit((DiagnosticInfoOptimizationBase &)R);
178178 }
329329
330330 /// Check if given function is safe for not having callee saved registers.
331331 /// This is used when interprocedural register allocation is enabled.
332 static bool isSafeForNoCSROpt(const Function *F) {
333 if (!F->hasLocalLinkage() || F->hasAddressTaken() ||
334 !F->hasFnAttribute(Attribute::NoRecurse))
332 static bool isSafeForNoCSROpt(const Function &F) {
333 if (!F.hasLocalLinkage() || F.hasAddressTaken() ||
334 !F.hasFnAttribute(Attribute::NoRecurse))
335335 return false;
336336 // Function should not be optimized as tail call.
337 for (const User *U : F->users())
337 for (const User *U : F.users())
338338 if (auto CS = ImmutableCallSite(U))
339339 if (CS.isTailCall())
340340 return false;
130130 // This is here to help easily convert from FunctionT * (Function * or
131131 // MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling
132132 // FunctionT->getFunction().
133 const Function *getFunction() const { return this; }
133 const Function &getFunction() const { return *this; }
134134
135135 static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
136136 const Twine &N = "", Module *M = nullptr) {
667667
668668 int EntryBBNumber = MF.front().getNumber();
669669 bool IsSEH = isAsynchronousEHPersonality(
670 classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
670 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
671671
672672 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
673673 SmallVector FuncletBlocks;
5959 ///
6060 void ARMException::endFunction(const MachineFunction *MF) {
6161 ARMTargetStreamer &ATS = getTargetStreamer();
62 const Function *F = MF->getFunction();
62 const Function &F = MF->getFunction();
6363 const Function *Per = nullptr;
64 if (F->hasPersonalityFn())
65 Per = dyn_cast(F->getPersonalityFn()->stripPointerCasts());
64 if (F.hasPersonalityFn())
65 Per = dyn_cast(F.getPersonalityFn()->stripPointerCasts());
6666 bool forceEmitPersonality =
67 F->hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
68 F->needsUnwindTableEntry();
67 F.hasPersonalityFn() && !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
68 F.needsUnwindTableEntry();
6969 bool shouldEmitPersonality = forceEmitPersonality ||
7070 !MF->getLandingPads().empty();
71 if (!Asm->MF->getFunction()->needsUnwindTableEntry() &&
71 if (!Asm->MF->getFunction().needsUnwindTableEntry() &&
7272 !shouldEmitPersonality)
7373 ATS.emitCantUnwind();
7474 else if (shouldEmitPersonality) {
620620 /// EmitFunctionHeader - This method emits the header for the current
621621 /// function.
622622 void AsmPrinter::EmitFunctionHeader() {
623 const Function *F = MF->getFunction();
623 const Function &F = MF->getFunction();
624624
625625 if (isVerbose())
626626 OutStreamer->GetCommentOS()
627627 << "-- Begin function "
628 << GlobalValue::dropLLVMManglingEscape(F->getName()) << '\n';
628 << GlobalValue::dropLLVMManglingEscape(F.getName()) << '\n';
629629
630630 // Print out constants referenced by the function
631631 EmitConstantPool();
632632
633633 // Print the 'header' of function.
634 OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(F, TM));
635 EmitVisibility(CurrentFnSym, F->getVisibility());
636
637 EmitLinkage(F, CurrentFnSym);
634 OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(&F, TM));
635 EmitVisibility(CurrentFnSym, F.getVisibility());
636
637 EmitLinkage(&F, CurrentFnSym);
638638 if (MAI->hasFunctionAlignment())
639 EmitAlignment(MF->getAlignment(), F);
639 EmitAlignment(MF->getAlignment(), &F);
640640
641641 if (MAI->hasDotTypeDotSizeDirective())
642642 OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_ELF_TypeFunction);
643643
644644 if (isVerbose()) {
645 F->printAsOperand(OutStreamer->GetCommentOS(),
646 /*PrintType=*/false, F->getParent());
645 F.printAsOperand(OutStreamer->GetCommentOS(),
646 /*PrintType=*/false, F.getParent());
647647 OutStreamer->GetCommentOS() << '\n';
648648 }
649649
650650 // Emit the prefix data.
651 if (F->hasPrefixData()) {
651 if (F.hasPrefixData()) {
652652 if (MAI->hasSubsectionsViaSymbols()) {
653653 // Preserving prefix data on platforms which use subsections-via-symbols
654654 // is a bit tricky. Here we introduce a symbol for the prefix data
657657 MCSymbol *PrefixSym = OutContext.createLinkerPrivateTempSymbol();
658658 OutStreamer->EmitLabel(PrefixSym);
659659
660 EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
660 EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData());
661661
662662 // Emit an .alt_entry directive for the actual function symbol.
663663 OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_AltEntry);
664664 } else {
665 EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
665 EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrefixData());
666666 }
667667 }
668668
674674 // references to the dangling symbols. Emit them at the start of the function
675675 // so that we don't get references to undefined symbols.
676676 std::vector DeadBlockSyms;
677 MMI->takeDeletedSymbolsForFunction(F, DeadBlockSyms);
677 MMI->takeDeletedSymbolsForFunction(&F, DeadBlockSyms);
678678 for (unsigned i = 0, e = DeadBlockSyms.size(); i != e; ++i) {
679679 OutStreamer->AddComment("Address taken block that was later removed");
680680 OutStreamer->EmitLabel(DeadBlockSyms[i]);
699699 }
700700
701701 // Emit the prologue data.
702 if (F->hasPrologueData())
703 EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrologueData());
702 if (F.hasPrologueData())
703 EmitGlobalConstant(F.getParent()->getDataLayout(), F.getPrologueData());
704704 }
705705
706706 /// EmitFunctionEntryLabel - Emit the label that is the entrypoint for the
899899
900900 AsmPrinter::CFIMoveType AsmPrinter::needsCFIMoves() const {
901901 if (MAI->getExceptionHandlingType() == ExceptionHandling::DwarfCFI &&
902 MF->getFunction()->needsUnwindTableEntry())
902 MF->getFunction().needsUnwindTableEntry())
903903 return CFI_M_EH;
904904
905905 if (MMI->hasDebugInfo())
909909 }
910910
911911 bool AsmPrinter::needsSEHMoves() {
912 return MAI->usesWindowsCFI() && MF->getFunction()->needsUnwindTableEntry();
912 return MAI->usesWindowsCFI() && MF->getFunction().needsUnwindTableEntry();
913913 }
914914
915915 void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) {
963963 OutStreamer->PushSection();
964964 OutStreamer->SwitchSection(StackSizeSection);
965965
966 const MCSymbol *FunctionSymbol = getSymbol(MF.getFunction());
966 const MCSymbol *FunctionSymbol = getSymbol(&MF.getFunction());
967967 uint64_t StackSize = FrameInfo.getStackSize();
968968 OutStreamer->EmitValue(MCSymbolRefExpr::create(FunctionSymbol, OutContext),
969969 /* size = */ 8);
979979
980980 // We might emit an EH table that uses function begin and end labels even if
981981 // we don't have any landingpads.
982 if (!MF.getFunction()->hasPersonalityFn())
982 if (!MF.getFunction().hasPersonalityFn())
983983 return false;
984984 return !isNoOpWithoutInvoke(
985 classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
985 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
986986 }
987987
988988 /// EmitFunctionBody - This method emits the body and trailer for a
10691069
10701070 EmittedInsts += NumInstsInFunction;
10711071 MachineOptimizationRemarkAnalysis R(DEBUG_TYPE, "InstructionCount",
1072 MF->getFunction()->getSubprogram(),
1072 MF->getFunction().getSubprogram(),
10731073 &MF->front());
10741074 R << ore::NV("NumInstructions", NumInstsInFunction)
10751075 << " instructions in function";
10971097 }
10981098 }
10991099
1100 const Function *F = MF->getFunction();
1101 for (const auto &BB : *F) {
1100 const Function &F = MF->getFunction();
1101 for (const auto &BB : F) {
11021102 if (!BB.hasAddressTaken())
11031103 continue;
11041104 MCSymbol *Sym = GetBlockAddressSymbol(&BB);
14411441 void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
14421442 this->MF = &MF;
14431443 // Get the function symbol.
1444 CurrentFnSym = getSymbol(MF.getFunction());
1444 CurrentFnSym = getSymbol(&MF.getFunction());
14451445 CurrentFnSymForSize = CurrentFnSym;
14461446 CurrentFnBegin = nullptr;
14471447 CurExceptionSym = nullptr;
15671567
15681568 // Pick the directive to use to print the jump table entries, and switch to
15691569 // the appropriate section.
1570 const Function *F = MF->getFunction();
1570 const Function &F = MF->getFunction();
15711571 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
15721572 bool JTInDiffSection = !TLOF.shouldPutJumpTableInFunctionSection(
15731573 MJTI->getEntryKind() == MachineJumpTableInfo::EK_LabelDifference32,
1574 *F);
1574 F);
15751575 if (JTInDiffSection) {
15761576 // Drop it in the readonly section.
1577 MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(*F, TM);
1577 MCSection *ReadOnlySection = TLOF.getSectionForJumpTable(F, TM);
15781578 OutStreamer->SwitchSection(ReadOnlySection);
15791579 }
15801580
19481948 raw_string_ostream OS(S);
19491949 OS << "Unsupported expression in static initializer: ";
19501950 CE->printAsOperand(OS, /*PrintType=*/false,
1951 !MF ? nullptr : MF->getFunction()->getParent());
1951 !MF ? nullptr : MF->getFunction().getParent());
19521952 report_fatal_error(OS.str());
19531953 }
19541954 case Instruction::GetElementPtr: {
26312631 assert(MF != nullptr && "Machine function must be valid");
26322632 assert(LI != nullptr && "Loop info must be valid");
26332633 Context.IsPaddingActive = !MF->hasInlineAsm() &&
2634 !MF->getFunction()->optForSize() &&
2634 !MF->getFunction().optForSize() &&
26352635 TM.getOptLevel() != CodeGenOpt::None;
26362636 const MachineLoop *CurrentLoop = LI->getLoopFor(&MBB);
26372637 Context.IsBasicBlockInsideInnermostLoop =
28292829 return;
28302830
28312831 auto PrevSection = OutStreamer->getCurrentSectionOnly();
2832 auto Fn = MF->getFunction();
2832 const Function &F = MF->getFunction();
28332833 MCSection *InstMap = nullptr;
28342834 MCSection *FnSledIndex = nullptr;
28352835 if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
28372837 assert(Associated != nullptr);
28382838 auto Flags = ELF::SHF_WRITE | ELF::SHF_ALLOC | ELF::SHF_LINK_ORDER;
28392839 std::string GroupName;
2840 if (Fn->hasComdat()) {
2840 if (F.hasComdat()) {
28412841 Flags |= ELF::SHF_GROUP;
2842 GroupName = Fn->getComdat()->getName();
2842 GroupName = F.getComdat()->getName();
28432843 }
28442844
28452845 auto UniqueID = ++XRayFnUniqueID;
28852885
28862886 void AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI,
28872887 SledKind Kind, uint8_t Version) {
2888 auto Fn = MI.getMF()->getFunction();
2889 auto Attr = Fn->getFnAttribute("function-instrument");
2890 bool LogArgs = Fn->hasFnAttribute("xray-log-args");
2888 const Function &F = MI.getMF()->getFunction();
2889 auto Attr = F.getFnAttribute("function-instrument");
2890 bool LogArgs = F.hasFnAttribute("xray-log-args");
28912891 bool AlwaysInstrument =
28922892 Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always";
28932893 if (Kind == SledKind::FUNCTION_ENTER && LogArgs)
28942894 Kind = SledKind::LOG_ARGS_ENTER;
28952895 Sleds.emplace_back(XRayFunctionEntry{Sled, CurrentFnSym, Kind,
2896 AlwaysInstrument, Fn, Version});
2896 AlwaysInstrument, &F, Version});
28972897 }
28982898
28992899 uint16_t AsmPrinter::getDwarfVersion() const {
513513 // Reset SanitizeAddress based on the function's attribute.
514514 MCTargetOptions MCOptions = TM.Options.MCOptions;
515515 MCOptions.SanitizeAddress =
516 MF->getFunction()->hasFnAttribute(Attribute::SanitizeAddress);
516 MF->getFunction().hasFnAttribute(Attribute::SanitizeAddress);
517517
518518 EmitInlineAsm(OS.str(), getSubtargetInfo(), MCOptions, LocMD,
519519 MI->getInlineAsmDialect());
11531153 }
11541154
11551155 void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) {
1156 const Function *GV = MF->getFunction();
1157 assert(FnDebugInfo.count(GV) == false);
1158 CurFn = &FnDebugInfo[GV];
1156 const Function &GV = MF->getFunction();
1157 assert(FnDebugInfo.count(&GV) == false);
1158 CurFn = &FnDebugInfo[&GV];
11591159 CurFn->FuncId = NextFuncId++;
11601160 CurFn->Begin = Asm->getFunctionBegin();
11611161
22722272 }
22732273
22742274 void CodeViewDebug::endFunctionImpl(const MachineFunction *MF) {
2275 const Function *GV = MF->getFunction();
2276 assert(FnDebugInfo.count(GV));
2277 assert(CurFn == &FnDebugInfo[GV]);
2278
2279 collectVariableInfo(GV->getSubprogram());
2275 const Function &GV = MF->getFunction();
2276 assert(FnDebugInfo.count(&GV));
2277 assert(CurFn == &FnDebugInfo[&GV]);
2278
2279 collectVariableInfo(GV.getSubprogram());
22802280
22812281 // Don't emit anything if we don't have any line tables.
22822282 if (!CurFn->HaveLineInfo) {
2283 FnDebugInfo.erase(GV);
2283 FnDebugInfo.erase(&GV);
22842284 CurFn = nullptr;
22852285 return;
22862286 }
178178 const MachineFunction *MF) {
179179 if (!MMI->hasDebugInfo())
180180 return false;
181 auto *SP = MF->getFunction()->getSubprogram();
181 auto *SP = MF->getFunction().getSubprogram();
182182 if (!SP)
183183 return false;
184184 assert(SP->getUnit());
222222 // label, so arguments are visible when breaking at function entry.
223223 const DILocalVariable *DIVar = Ranges.front().first->getDebugVariable();
224224 if (DIVar->isParameter() &&
225 getDISubprogram(DIVar->getScope())->describes(MF->getFunction())) {
225 getDISubprogram(DIVar->getScope())->describes(&MF->getFunction())) {
226226 LabelsBeforeInsn[Ranges.front().first] = Asm->getFunctionBegin();
227227 if (Ranges.front().first->getDebugExpression()->isFragment()) {
228228 // Mark all non-overlapping initial fragments.
8686
8787 void DwarfCFIException::beginFunction(const MachineFunction *MF) {
8888 shouldEmitMoves = shouldEmitPersonality = shouldEmitLSDA = false;
89 const Function *F = MF->getFunction();
89 const Function &F = MF->getFunction();
9090
9191 // If any landing pads survive, we need an EH table.
9292 bool hasLandingPads = !MF->getLandingPads().empty();
9999 const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
100100 unsigned PerEncoding = TLOF.getPersonalityEncoding();
101101 const Function *Per = nullptr;
102 if (F->hasPersonalityFn())
103 Per = dyn_cast(F->getPersonalityFn()->stripPointerCasts());
102 if (F.hasPersonalityFn())
103 Per = dyn_cast(F.getPersonalityFn()->stripPointerCasts());
104104
105105 // Emit a personality function even when there are no landing pads
106106 forceEmitPersonality =
107107 // ...if a personality function is explicitly specified
108 F->hasPersonalityFn() &&
108 F.hasPersonalityFn() &&
109109 // ... and it's not known to be a noop in the absence of invokes
110110 !isNoOpWithoutInvoke(classifyEHPersonality(Per)) &&
111111 // ... and we're not explicitly asked not to emit it
112 F->needsUnwindTableEntry();
112 F.needsUnwindTableEntry();
113113
114114 shouldEmitPersonality =
115115 (forceEmitPersonality ||
142142 if (!shouldEmitPersonality)
143143 return;
144144
145 auto *F = MBB->getParent()->getFunction();
146 auto *P = dyn_cast(F->getPersonalityFn()->stripPointerCasts());
145 auto &F = MBB->getParent()->getFunction();
146 auto *P = dyn_cast(F.getPersonalityFn()->stripPointerCasts());
147147 assert(P && "Expected personality function");
148148
149149 // If we are forced to emit this personality, make sure to record
11621162 DebugHandlerBase::beginInstruction(MI);
11631163 assert(CurMI);
11641164
1165 const auto *SP = MI->getMF()->getFunction()->getSubprogram();
1165 const auto *SP = MI->getMF()->getFunction().getSubprogram();
11661166 if (!SP || SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
11671167 return;
11681168
12601260 void DwarfDebug::beginFunctionImpl(const MachineFunction *MF) {
12611261 CurFn = MF;
12621262
1263 auto *SP = MF->getFunction()->getSubprogram();
1263 auto *SP = MF->getFunction().getSubprogram();
12641264 assert(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode());
12651265 if (SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
12661266 return;
12961296
12971297 // Gather and emit post-function debug information.
12981298 void DwarfDebug::endFunctionImpl(const MachineFunction *MF) {
1299 const DISubprogram *SP = MF->getFunction()->getSubprogram();
1299 const DISubprogram *SP = MF->getFunction().getSubprogram();
13001300
13011301 assert(CurFn == MF &&
13021302 "endFunction should be called with the same function as beginFunction");
6262 bool hasLandingPads = !MF->getLandingPads().empty();
6363 bool hasEHFunclets = MF->hasEHFunclets();
6464
65 const Function *F = MF->getFunction();
65 const Function &F = MF->getFunction();
6666
6767 shouldEmitMoves = Asm->needsSEHMoves() && MF->hasWinCFI();
6868
7171
7272 EHPersonality Per = EHPersonality::Unknown;
7373 const Function *PerFn = nullptr;
74 if (F->hasPersonalityFn()) {
75 PerFn = dyn_cast(F->getPersonalityFn()->stripPointerCasts());
74 if (F.hasPersonalityFn()) {
75 PerFn = dyn_cast(F.getPersonalityFn()->stripPointerCasts());
7676 Per = classifyEHPersonality(PerFn);
7777 }
7878
79 bool forceEmitPersonality = F->hasPersonalityFn() &&
79 bool forceEmitPersonality = F.hasPersonalityFn() &&
8080 !isNoOpWithoutInvoke(Per) &&
81 F->needsUnwindTableEntry();
81 F.needsUnwindTableEntry();
8282
8383 shouldEmitPersonality =
8484 forceEmitPersonality || ((hasLandingPads || hasEHFunclets) &&
9797 // functions may still refer to it.
9898 const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();
9999 StringRef FLinkageName =
100 GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName());
100 GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
101101 emitEHRegistrationOffsetLabel(FuncInfo, FLinkageName);
102102 }
103103 shouldEmitLSDA = hasEHFunclets;
114114 if (!shouldEmitPersonality && !shouldEmitMoves && !shouldEmitLSDA)
115115 return;
116116
117 const Function *F = MF->getFunction();
117 const Function &F = MF->getFunction();
118118 EHPersonality Per = EHPersonality::Unknown;
119 if (F->hasPersonalityFn())
120 Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts());
119 if (F.hasPersonalityFn())
120 Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts());
121121
122122 // Get rid of any dead landing pads if we're not using funclets. In funclet
123123 // schemes, the landing pad is not actually reachable. It only exists so
169169 // Give catches and cleanups a name based off of their parent function and
170170 // their funclet entry block's number.
171171 const MachineFunction *MF = MBB->getParent();
172 const Function *F = MF->getFunction();
173 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
172 const Function &F = MF->getFunction();
173 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
174174 MCContext &Ctx = MF->getContext();
175175 StringRef HandlerPrefix = MBB->isCleanupFuncletEntry() ? "dtor" : "catch";
176176 return Ctx.getOrCreateSymbol("?" + HandlerPrefix + "$" +
182182 MCSymbol *Sym) {
183183 CurrentFuncletEntry = &MBB;
184184
185 const Function *F = Asm->MF->getFunction();
185 const Function &F = Asm->MF->getFunction();
186186 // If a symbol was not provided for the funclet, invent one.
187187 if (!Sym) {
188188 Sym = getMCSymbolForMBB(Asm, &MBB);
197197 // We want our funclet's entry point to be aligned such that no nops will be
198198 // present after the label.
199199 Asm->EmitAlignment(std::max(Asm->MF->getAlignment(), MBB.getAlignment()),
200 F);
200 &F);
201201
202202 // Now that we've emitted the alignment directive, point at our funclet.
203203 Asm->OutStreamer->EmitLabel(Sym);
214214 const Function *PerFn = nullptr;
215215
216216 // Determine which personality routine we are using for this funclet.
217 if (F->hasPersonalityFn())
218 PerFn = dyn_cast(F->getPersonalityFn()->stripPointerCasts());
217 if (F.hasPersonalityFn())
218 PerFn = dyn_cast(F.getPersonalityFn()->stripPointerCasts());
219219 const MCSymbol *PersHandlerSym =
220220 TLOF.getCFIPersonalitySymbol(PerFn, Asm->TM, MMI);
221221
236236
237237 const MachineFunction *MF = Asm->MF;
238238 if (shouldEmitMoves || shouldEmitPersonality) {
239 const Function *F = MF->getFunction();
239 const Function &F = MF->getFunction();
240240 EHPersonality Per = EHPersonality::Unknown;
241 if (F->hasPersonalityFn())
242 Per = classifyEHPersonality(F->getPersonalityFn()->stripPointerCasts());
241 if (F.hasPersonalityFn())
242 Per = classifyEHPersonality(F.getPersonalityFn()->stripPointerCasts());
243243
244244 // Emit an UNWIND_INFO struct describing the prologue.
245245 Asm->OutStreamer->EmitWinEHHandlerData();
248248 !CurrentFuncletEntry->isCleanupFuncletEntry()) {
249249 // If this is a C++ catch funclet (or the parent function),
250250 // emit a reference to the LSDA for the parent function.
251 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
251 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
252252 MCSymbol *FuncInfoXData = Asm->OutContext.getOrCreateSymbol(
253253 Twine("$cppxdata$", FuncLinkageName));
254254 Asm->OutStreamer->EmitValue(create32bitRef(FuncInfoXData), 4);
532532 // Emit a label assignment with the SEH frame offset so we can use it for
533533 // llvm.x86.seh.recoverfp.
534534 StringRef FLinkageName =
535 GlobalValue::dropLLVMManglingEscape(MF->getFunction()->getName());
535 GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
536536 MCSymbol *ParentFrameOffset =
537537 Ctx.getOrCreateParentFrameOffsetSymbol(FLinkageName);
538538 const MCExpr *MCOffset =
627627 }
628628
629629 void WinException::emitCXXFrameHandler3Table(const MachineFunction *MF) {
630 const Function *F = MF->getFunction();
630 const Function &F = MF->getFunction();
631631 auto &OS = *Asm->OutStreamer;
632632 const WinEHFuncInfo &FuncInfo = *MF->getWinEHFuncInfo();
633633
634 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
634 StringRef FuncLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
635635
636636 SmallVector, 4> IPToStateTable;
637637 MCSymbol *FuncInfoXData = nullptr;
937937 /// indexed by state number instead of IP.
938938 void WinException::emitExceptHandlerTable(const MachineFunction *MF) {
939939 MCStreamer &OS = *Asm->OutStreamer;
940 const Function *F = MF->getFunction();
941 StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F->getName());
940 const Function &F = MF->getFunction();
941 StringRef FLinkageName = GlobalValue::dropLLVMManglingEscape(F.getName());
942942
943943 bool VerboseAsm = OS.isVerboseAsm();
944944 auto AddComment = [&](const Twine &Comment) {
955955 OS.EmitLabel(LSDALabel);
956956
957957 const Function *Per =
958 dyn_cast(F->getPersonalityFn()->stripPointerCasts());
958 dyn_cast(F.getPersonalityFn()->stripPointerCasts());
959959 StringRef PerName = Per->getName();
960960 int BaseState = -1;
961961 if (PerName == "_except_handler4") {
117117 "Control Flow Optimizer", false, false)
118118
119119 bool BranchFolderPass::runOnMachineFunction(MachineFunction &MF) {
120 if (skipFunction(*MF.getFunction()))
120 if (skipFunction(MF.getFunction()))
121121 return false;
122122
123123 TargetPassConfig *PassConfig = &getAnalysis();
684684 // branch instruction, which is likely to be smaller than the 2
685685 // instructions that would be deleted in the merge.
686686 MachineFunction *MF = MBB1->getParent();
687 return EffectiveTailLen >= 2 && MF->getFunction()->optForSize() &&
687 return EffectiveTailLen >= 2 && MF->getFunction().optForSize() &&
688688 (I1 == MBB1->begin() || I2 == MBB2->begin());
689689 }
690690
15101510 }
15111511
15121512 if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
1513 MF.getFunction()->optForSize()) {
1513 MF.getFunction().optForSize()) {
15141514 // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
15151515 // direction, thereby defeating careful block placement and regressing
15161516 // performance. Therefore, only consider this for optsize functions.
9393 }
9494
9595 bool DeadMachineInstructionElim::runOnMachineFunction(MachineFunction &MF) {
96 if (skipFunction(*MF.getFunction()))
96 if (skipFunction(MF.getFunction()))
9797 return false;
9898
9999 bool AnyChanges = false;
784784 bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
785785 DEBUG(dbgs() << "********** EARLY IF-CONVERSION **********\n"
786786 << "********** Function: " << MF.getName() << '\n');
787 if (skipFunction(*MF.getFunction()))
787 if (skipFunction(MF.getFunction()))
788788 return false;
789789
790790 // Only run if conversion if the target wants it.
616616 }
617617
618618 bool ExecutionDepsFix::runOnMachineFunction(MachineFunction &mf) {
619 if (skipFunction(*mf.getFunction()))
619 if (skipFunction(mf.getFunction()))
620620 return false;
621621 MF = &mf;
622622 TII = MF->getSubtarget().getInstrInfo();
3535
3636 bool FEntryInserter::runOnMachineFunction(MachineFunction &MF) {
3737 const std::string FEntryName =
38 MF.getFunction()->getFnAttribute("fentry-call").getValueAsString();
38 MF.getFunction().getFnAttribute("fentry-call").getValueAsString();
3939 if (FEntryName != "true")
4040 return false;
4141
327327
328328 bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
329329 // Quick exit for functions that do not use GC.
330 if (!MF.getFunction()->hasGC())
330 if (!MF.getFunction().hasGC())
331331 return false;
332332
333 FI = &getAnalysis().getFunctionInfo(*MF.getFunction());
333 FI = &getAnalysis().getFunctionInfo(MF.getFunction());
334334 MMI = &getAnalysis();
335335 TII = MF.getSubtarget().getInstrInfo();
336336
107107 ArrayRef Args,
108108 ValueHandler &Handler) const {
109109 MachineFunction &MF = MIRBuilder.getMF();
110 const Function &F = *MF.getFunction();
110 const Function &F = MF.getFunction();
111111 const DataLayout &DL = F.getParent()->getDataLayout();
112112
113113 SmallVector ArgLocs;
123123 bool Success = translate(*CV, VReg);
124124 if (!Success) {
125125 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
126 MF->getFunction()->getSubprogram(),
127 &MF->getFunction()->getEntryBlock());
126 MF->getFunction().getSubprogram(),
127 &MF->getFunction().getEntryBlock());
128128 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
129129 reportTranslationError(*MF, *TPC, *ORE, R);
130130 return VReg;
590590 MIB.addDef(DstReg);
591591
592592 auto &TLI = *MF->getSubtarget().getTargetLowering();
593 Value *Global = TLI.getSDagStackGuard(*MF->getFunction()->getParent());
593 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
594594 if (!Global)
595595 return;
596596
924924 // If there aren't registers to copy the values into (e.g., during SjLj
925925 // exceptions), then don't bother.
926926 auto &TLI = *MF->getSubtarget().getTargetLowering();
927 const Constant *PersonalityFn = MF->getFunction()->getPersonalityFn();
927 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
928928 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
929929 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
930930 return true;
12351235
12361236 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
12371237 MF = &CurMF;
1238 const Function &F = *MF->getFunction();
1238 const Function &F = MF->getFunction();
12391239 if (F.empty())
12401240 return false;
12411241 CLI = MF->getSubtarget().getCallLowering();
12511251 if (!DL->isLittleEndian()) {
12521252 // Currently we don't properly handle big endian code.
12531253 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1254 MF->getFunction()->getSubprogram(),
1255 &MF->getFunction()->getEntryBlock());
1254 F.getSubprogram(), &F.getEntryBlock());
12561255 R << "unable to translate in big endian mode";
12571256 reportTranslationError(*MF, *TPC, *ORE, R);
12581257 }
12881287 }
12891288 if (!CLI->lowerFormalArguments(EntryBuilder, F, VRegArgs)) {
12901289 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
1291 MF->getFunction()->getSubprogram(),
1292 &MF->getFunction()->getEntryBlock());
1290 F.getSubprogram(), &F.getEntryBlock());
12931291 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
12941292 reportTranslationError(*MF, *TPC, *ORE, R);
12951293 return false;
188188
189189 if (MF.size() != NumBlocks) {
190190 MachineOptimizationRemarkMissed R("gisel-select", "GISelFailure",
191 MF.getFunction()->getSubprogram(),
191 MF.getFunction().getSubprogram(),
192192 /*MBB=*/nullptr);
193193 R << "inserting blocks is not supported yet";
194194 reportGISelFailure(MF, TPC, MORE, R);
174174 // outerloop for that.
175175 if (MF.size() != NumBlocks) {
176176 MachineOptimizationRemarkMissed R("gisel-legalize", "GISelFailure",
177 MF.getFunction()->getSubprogram(),
177 MF.getFunction().getSubprogram(),
178178 /*MBB=*/nullptr);
179179 R << "inserting blocks is not supported yet";
180180 reportGISelFailure(MF, TPC, MORE, R);
135135 LegalizerHelper::libcall(MachineInstr &MI) {
136136 LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
137137 unsigned Size = LLTy.getSizeInBits();
138 auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
138 auto &Ctx = MIRBuilder.getMF().getFunction().getContext();
139139
140140 MIRBuilder.setInstr(MI);
141141
409409 return UnableToLegalize;
410410 int NumParts = SizeOp0 / NarrowSize;
411411 const APInt &Cst = MI.getOperand(1).getCImm()->getValue();
412 LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext();
412 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
413413
414414 SmallVector DstRegs;
415415 for (int i = 0; i < NumParts; ++i) {
823823 return UnableToLegalize;
824824 unsigned Res = MI.getOperand(0).getReg();
825825 Type *ZeroTy;
826 LLVMContext &Ctx = MIRBuilder.getMF().getFunction()->getContext();
826 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext();
827827 switch (Ty.getSizeInBits()) {
828828 case 16:
829829 ZeroTy = Type::getHalfTy(Ctx);
262262
263263 const ConstantInt *NewVal = &Val;
264264 if (Ty.getSizeInBits() != Val.getBitWidth())
265 NewVal = ConstantInt::get(MF->getFunction()->getContext(),
265 NewVal = ConstantInt::get(MF->getFunction().getContext(),
266266 Val.getValue().sextOrTrunc(Ty.getSizeInBits()));
267267
268268 return buildInstr(TargetOpcode::G_CONSTANT).addDef(Res).addCImm(NewVal);
270270
271271 MachineInstrBuilder MachineIRBuilder::buildConstant(unsigned Res,
272272 int64_t Val) {
273 auto IntN = IntegerType::get(MF->getFunction()->getContext(),
273 auto IntN = IntegerType::get(MF->getFunction().getContext(),
274274 MRI->getType(Res).getSizeInBits());
275275 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
276276 return buildConstant(Res, *CI);
600600 return false;
601601
602602 DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n');
603 const Function *F = MF.getFunction();
603 const Function &F = MF.getFunction();
604604 Mode SaveOptMode = OptMode;
605 if (F->hasFnAttribute(Attribute::OptimizeNone))
605 if (F.hasFnAttribute(Attribute::OptimizeNone))
606606 OptMode = Mode::Fast;
607607 init(MF);
608608
336336 INITIALIZE_PASS_END(IfConverter, DEBUG_TYPE, "If Converter", false, false)
337337
338338 bool IfConverter::runOnMachineFunction(MachineFunction &MF) {
339 if (skipFunction(*MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF)))
339 if (skipFunction(MF.getFunction()) || (PredicateFtor && !PredicateFtor(MF)))
340340 return false;
341341
342342 const TargetSubtargetInfo &ST = MF.getSubtarget();
4848 void LexicalScopes::initialize(const MachineFunction &Fn) {
4949 reset();
5050 // Don't attempt any lexical scope creation for a NoDebug compile unit.
51 if (Fn.getFunction()->getSubprogram()->getUnit()->getEmissionKind() ==
51 if (Fn.getFunction().getSubprogram()->getUnit()->getEmissionKind() ==
5252 DICompileUnit::NoDebug)
5353 return;
5454 MF = &Fn;
172172 false)).first;
173173
174174 if (!Parent) {
175 assert(cast(Scope)->describes(MF->getFunction()));
175 assert(cast(Scope)->describes(&MF->getFunction()));
176176 assert(!CurrentFnLexicalScope);
177177 CurrentFnLexicalScope = &I->second;
178178 }
702702 }
703703
704704 bool LiveDebugValues::runOnMachineFunction(MachineFunction &MF) {
705 if (!MF.getFunction()->getSubprogram())
705 if (!MF.getFunction().getSubprogram())
706706 // LiveDebugValues will already have removed all DBG_VALUEs.
707707 return false;
708708
709709 // Skip functions from NoDebug compilation units.
710 if (MF.getFunction()->getSubprogram()->getUnit()->getEmissionKind() ==
710 if (MF.getFunction().getSubprogram()->getUnit()->getEmissionKind() ==
711711 DICompileUnit::NoDebug)
712712 return false;
713713
832832 bool LiveDebugVariables::runOnMachineFunction(MachineFunction &mf) {
833833 if (!EnableLDV)
834834 return false;
835 if (!mf.getFunction()->getSubprogram()) {
835 if (!mf.getFunction().getSubprogram()) {
836836 removeDebugValues(mf);
837837 return false;
838838 }
105105 }
106106
107107 bool LiveRangeShrink::runOnMachineFunction(MachineFunction &MF) {
108 if (skipFunction(*MF.getFunction()))
108 if (skipFunction(MF.getFunction()))
109109 return false;
110110
111111 MachineRegisterInfo &MRI = MF.getRegInfo();
430430 break;
431431 case MIToken::IRBlock:
432432 // TODO: Report an error when both name and ir block are specified.
433 if (parseIRBlock(BB, *MF.getFunction()))
433 if (parseIRBlock(BB, MF.getFunction()))
434434 return true;
435435 lex();
436436 break;
446446
447447 if (!Name.empty()) {
448448 BB = dyn_cast_or_null(
449 MF.getFunction()->getValueSymbolTable()->lookup(Name));
449 MF.getFunction().getValueSymbolTable()->lookup(Name));
450450 if (!BB)
451451 return error(Loc, Twine("basic block '") + Name +
452452 "' is not defined in the function '" +
12331233 const Constant *&C) {
12341234 auto Source = StringValue.str(); // The source has to be null terminated.
12351235 SMDiagnostic Err;
1236 C = parseConstantValue(Source, Err, *MF.getFunction()->getParent(),
1236 C = parseConstantValue(Source, Err, *MF.getFunction().getParent(),
12371237 &PFS.IRSlots);
12381238 if (!C)
12391239 return error(Loc + Err.getColumnNo(), Err.getMessage());
12531253 lex();
12541254 return false;
12551255 } else if (Token.is(MIToken::PointerType)) {
1256 const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
1256 const DataLayout &DL = MF.getDataLayout();
12571257 unsigned AS = APSInt(Token.range().drop_front()).getZExtValue();
12581258 Ty = LLT::pointer(AS, DL.getPointerSizeInBits(AS));
12591259 lex();
14181418 bool MIParser::parseGlobalValue(GlobalValue *&GV) {
14191419 switch (Token.kind()) {
14201420 case MIToken::NamedGlobalValue: {
1421 const Module *M = MF.getFunction()->getParent();
1421 const Module *M = MF.getFunction().getParent();
14221422 GV = M->getNamedValue(Token.stringValue());
14231423 if (!GV)
14241424 return error(Twine("use of undefined global value '") + Token.range() +
15561556 if (expectAndConsume(MIToken::rparen))
15571557 return true;
15581558
1559 Expr = DIExpression::get(MF.getFunction()->getContext(), Elements);
1559 Expr = DIExpression::get(MF.getFunction().getContext(), Elements);
15601560 return false;
15611561 }
15621562
21012101 bool MIParser::parseIRValue(const Value *&V) {
21022102 switch (Token.kind()) {
21032103 case MIToken::NamedIRValue: {
2104 V = MF.getFunction()->getValueSymbolTable()->lookup(Token.stringValue());
2104 V = MF.getFunction().getValueSymbolTable()->lookup(Token.stringValue());
21052105 break;
21062106 }
21072107 case MIToken::IRValue: {
23602360
23612361 // Optional synchronization scope.
23622362 SyncScope::ID SSID;
2363 if (parseOptionalScope(MF.getFunction()->getContext(), SSID))
2363 if (parseOptionalScope(MF.getFunction().getContext(), SSID))
23642364 return true;
23652365
23662366 // Up to two atomic orderings (cmpxchg provides guarantees on failure).
25412541
25422542 const BasicBlock *MIParser::getIRBlock(unsigned Slot) {
25432543 if (Slots2BasicBlocks.empty())
2544 initSlots2BasicBlocks(*MF.getFunction(), Slots2BasicBlocks);
2544 initSlots2BasicBlocks(MF.getFunction(), Slots2BasicBlocks);
25452545 return getIRBlockFromSlot(Slot, Slots2BasicBlocks);
25462546 }
25472547
25482548 const BasicBlock *MIParser::getIRBlock(unsigned Slot, const Function &F) {
2549 if (&F == MF.getFunction())
2549 if (&F == &MF.getFunction())
25502550 return getIRBlock(Slot);
25512551 DenseMap CustomSlots2BasicBlocks;
25522552 initSlots2BasicBlocks(F, CustomSlots2BasicBlocks);
25772577
25782578 const Value *MIParser::getIRValue(unsigned Slot) {
25792579 if (Slots2Values.empty())
2580 initSlots2Values(*MF.getFunction(), Slots2Values);
2580 initSlots2Values(MF.getFunction(), Slots2Values);
25812581 auto ValueInfo = Slots2Values.find(Slot);
25822582 if (ValueInfo == Slots2Values.end())
25832583 return nullptr;
550550 const yaml::MachineFunction &YamlMF) {
551551 MachineFunction &MF = PFS.MF;
552552 MachineFrameInfo &MFI = MF.getFrameInfo();
553 const Function &F = *MF.getFunction();
553 const Function &F = MF.getFunction();
554554 const yaml::MachineFrameInfo &YamlMFI = YamlMF.FrameInfo;
555555 MFI.setFrameAddressIsTaken(YamlMFI.IsFrameAddressTaken);
556556 MFI.setReturnAddressIsTaken(YamlMFI.IsReturnAddressTaken);
721721 MachineConstantPool &ConstantPool, const yaml::MachineFunction &YamlMF) {
722722 DenseMap &ConstantPoolSlots = PFS.ConstantPoolSlots;
723723 const MachineFunction &MF = PFS.MF;
724 const auto &M = *MF.getFunction()->getParent();
724 const auto &M = *MF.getFunction().getParent();
725725 SMDiagnostic Error;
726726 for (const auto &YamlConstant : YamlMF.Constants) {
727727 if (YamlConstant.IsTargetSpecific)
212212 MachineFunctionProperties::Property::Selected);
213213
214214 convert(YamlMF, MF.getRegInfo(), MF.getSubtarget().getRegisterInfo());
215 ModuleSlotTracker MST(MF.getFunction()->getParent());
216 MST.incorporateFunction(*MF.getFunction());
215 ModuleSlotTracker MST(MF.getFunction().getParent());
216 MST.incorporateFunction(MF.getFunction());
217217 convert(MST, YamlMF.FrameInfo, MF.getFrameInfo());
218218 convertStackObjects(YamlMF, MF, MST);
219219 if (const auto *ConstantPool = MF.getConstantPool())
695695
696696 if (!MI.memoperands_empty()) {
697697 OS << " :: ";
698 const LLVMContext &Context = MF->getFunction()->getContext();
698 const LLVMContext &Context = MF->getFunction().getContext();
699699 bool NeedComma = false;
700700 for (const auto *Op : MI.memoperands()) {
701701 if (NeedComma)
266266 << " is null\n";
267267 return;
268268 }
269 const Function *F = MF->getFunction();
270 const Module *M = F ? F->getParent() : nullptr;
269 const Function &F = MF->getFunction();
270 const Module *M = F.getParent();
271271 ModuleSlotTracker MST(M);
272272 print(OS, MST, Indexes);
273273 }
223223
224224 Optional MachineBlockFrequencyInfo::getBlockProfileCount(
225225 const MachineBasicBlock *MBB) const {
226 const Function *F = MBFI->getFunction()->getFunction();
227 return MBFI ? MBFI->getBlockProfileCount(*F, MBB) : None;
226 const Function &F = MBFI->getFunction()->getFunction();
227 return MBFI ? MBFI->getBlockProfileCount(F, MBB) : None;
228228 }
229229
230230 Optional
231231 MachineBlockFrequencyInfo::getProfileCountFromFreq(uint64_t Freq) const {
232 const Function *F = MBFI->getFunction()->getFunction();
233 return MBFI ? MBFI->getProfileCountFromFreq(*F, Freq) : None;
232 const Function &F = MBFI->getFunction()->getFunction();
233 return MBFI ? MBFI->getProfileCountFromFreq(F, Freq) : None;
234234 }
235235
236236 bool
12341234 // When profile is available, we need to handle the triangle-shape CFG.
12351235 static BranchProbability getLayoutSuccessorProbThreshold(
12361236 const MachineBasicBlock *BB) {
1237 if (!BB->getParent()->getFunction()->getEntryCount())
1237 if (!BB->getParent()->getFunction().getEntryCount())
12381238 return BranchProbability(StaticLikelyProb, 100);
12391239 if (BB->succ_size() == 2) {
12401240 const MachineBasicBlock *Succ1 = *BB->succ_begin();
17681768 // i.e. when the layout predecessor does not fallthrough to the loop header.
17691769 // In practice this never happens though: there always seems to be a preheader
17701770 // that can fallthrough and that is also placed before the header.
1771 if (F->getFunction()->optForSize())
1771 if (F->getFunction().optForSize())
17721772 return L.getHeader();
17731773
17741774 // Check that the header hasn't been fused with a preheader block due to
21772177 // will be merged into the first outer loop chain for which this block is not
21782178 // cold anymore. This needs precise profile data and we only do this when
21792179 // profile data is available.
2180 if (F->getFunction()->getEntryCount() || ForceLoopColdBlock) {
2180 if (F->getFunction().getEntryCount() || ForceLoopColdBlock) {
21812181 BlockFrequency LoopFreq(0);
21822182 for (auto LoopPred : L.getHeader()->predecessors())
21832183 if (!L.contains(LoopPred))
22192219 // for better layout.
22202220 bool RotateLoopWithProfile =
22212221 ForcePreciseRotationCost ||
2222 (PreciseRotationCost && F->getFunction()->getEntryCount());
2222 (PreciseRotationCost && F->getFunction().getEntryCount());
22232223
22242224 // First check to see if there is an obviously preferable top block for the
22252225 // loop. This will default to the header, but may end up as one of the
24842484 // exclusively on the loop info here so that we can align backedges in
24852485 // unnatural CFGs and backedges that were introduced purely because of the
24862486 // loop rotations done during this layout pass.
2487 if (F->getFunction()->optForSize())
2487 if (F->getFunction().optForSize())
24882488 return;
24892489 BlockChain &FunctionChain = *BlockToChain[&F->front()];
24902490 if (FunctionChain.begin() == FunctionChain.end())
27142714 }
27152715
27162716 bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
2717 if (skipFunction(*MF.getFunction()))
2717 if (skipFunction(MF.getFunction()))
27182718 return false;
27192719
27202720 // Check for single-block functions and skip them.
27592759
27602760 if (TailDupPlacement) {
27612761 MPDT = &getAnalysis();
2762 if (MF.getFunction()->optForSize())
2762 if (MF.getFunction().optForSize())
27632763 TailDupSize = 1;
27642764 bool PreRegAlloc = false;
27652765 TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize);
28162816 }
28172817 if (ViewBlockLayoutWithBFI != GVDT_None &&
28182818 (ViewBlockFreqFuncName.empty() ||
2819 F->getFunction()->getName().equals(ViewBlockFreqFuncName))) {
2819 F->getFunction().getName().equals(ViewBlockFreqFuncName))) {
28202820 MBFI->view("MBP." + MF.getName(), false);
28212821 }
28222822
726726 }
727727
728728 bool MachineCSE::runOnMachineFunction(MachineFunction &MF) {
729 if (skipFunction(*MF.getFunction()))
729 if (skipFunction(MF.getFunction()))
730730 return false;
731731
732732 TII = MF.getSubtarget().getInstrInfo();
547547 MLI = &getAnalysis();
548548 Traces = &getAnalysis();
549549 MinInstr = nullptr;
550 OptSize = MF.getFunction()->optForSize();
550 OptSize = MF.getFunction().optForSize();
551551
552552 DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
553553 if (!TII->useMachineCombiner()) {
377377 }
378378
379379 bool MachineCopyPropagation::runOnMachineFunction(MachineFunction &MF) {
380 if (skipFunction(*MF.getFunction()))
380 if (skipFunction(MF.getFunction()))
381381 return false;
382382
383383 Changed = false;
243243
244244 /// Should we be emitting segmented stack stuff for the function
245245 bool MachineFunction::shouldSplitStack() const {
246 return getFunction()->hasFnAttribute("split-stack");
246 return getFunction().hasFnAttribute("split-stack");
247247 }
248248
249249 /// This discards all of the MachineBasicBlock numbers and recomputes them.
484484 #endif
485485
486486 StringRef MachineFunction::getName() const {
487 assert(getFunction() && "No function!");
488 return getFunction()->getName();
487 return getFunction().getName();
489488 }
490489
491490 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
518517 OS << '\n';
519518 }
520519
521 ModuleSlotTracker MST(getFunction()->getParent());
522 MST.incorporateFunction(*getFunction());
520 ModuleSlotTracker MST(getFunction().getParent());
521 MST.incorporateFunction(getFunction());
523522 for (const auto &BB : *this) {
524523 OS << '\n';
525524 BB.print(OS, MST, Indexes);
12101210 const Module *M = nullptr;
12111211 if (const MachineBasicBlock *MBB = getParent())
12121212 if (const MachineFunction *MF = MBB->getParent())
1213 M = MF->getFunction()->getParent();
1213 M = MF->getFunction().getParent();
12141214
12151215 ModuleSlotTracker MST(M);
12161216 print(OS, MST, SkipOpers, SkipDebugLoc, TII);
279279 }
280280
281281 bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
282 if (skipFunction(*MF.getFunction()))
282 if (skipFunction(MF.getFunction()))
283283 return false;
284284
285285 Changed = FirstInLoop = false;
4949 auto &OptDiag = cast(OptDiagCommon);
5050 computeHotness(OptDiag);
5151
52 LLVMContext &Ctx = MF.getFunction()->getContext();
52 LLVMContext &Ctx = MF.getFunction().getContext();
5353
5454 // Only emit it if its hotness meets the threshold.
5555 if (OptDiag.getHotness().getValueOr(0) <
7070 MachineFunction &MF) {
7171 MachineBlockFrequencyInfo *MBFI;
7272
73 if (MF.getFunction()->getContext().getDiagnosticsHotnessRequested())
73 if (MF.getFunction().getContext().getDiagnosticsHotnessRequested())
7474 MBFI = &getAnalysis().getBFI();
7575 else
7676 MBFI = nullptr;
728728
729729 /// The "main" function for implementing Swing Modulo Scheduling.
730730 bool MachinePipeliner::runOnMachineFunction(MachineFunction &mf) {
731 if (skipFunction(*mf.getFunction()))
731 if (skipFunction(mf.getFunction()))
732732 return false;
733733
734734 if (!EnableSWP)
735735 return false;
736736
737 if (mf.getFunction()->getAttributes().hasAttribute(
737 if (mf.getFunction().getAttributes().hasAttribute(
738738 AttributeList::FunctionIndex, Attribute::OptimizeForSize) &&
739739 !EnableSWPOptSize.getPosition())
740740 return false;
530530 const MachineFunction &MF = *MBB.getParent();
531531 // We need to keep correct unwind information even if the function will
532532 // not return, since the runtime may need it.
533 if (MF.getFunction()->hasFnAttribute(Attribute::UWTable))
533 if (MF.getFunction().hasFnAttribute(Attribute::UWTable))
534534 return false;
535535 const Function *Called = getCalledFunction(MI);
536536 return !(Called == nullptr || !Called->hasFnAttribute(Attribute::NoReturn) ||
350350 /// design would be to split blocks at scheduling boundaries, but LLVM has a
351351 /// general bias against block splitting purely for implementation simplicity.
352352 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
353 if (skipFunction(*mf.getFunction()))
353 if (skipFunction(mf.getFunction()))
354354 return false;
355355
356356 if (EnableMachineSched.getNumOccurrences()) {
388388 }
389389
390390 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
391 if (skipFunction(*mf.getFunction()))
391 if (skipFunction(mf.getFunction()))
392392 return false;
393393
394394 if (EnablePostRAMachineSched.getNumOccurrences()) {
291291 }
292292
293293 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) {
294 if (skipFunction(*MF.getFunction()))
294 if (skipFunction(MF.getFunction()))
295295 return false;
296296
297297 DEBUG(dbgs() << "******** Machine Sinking ********\n");
636636
637637 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
638638 const BasicBlock *BB = MBB->getBasicBlock();
639 const Function *Fn = MF->getFunction();
639 const Function &F = MF->getFunction();
640640 if (LandingPadSuccs.size() > 1 &&
641641 !(AsmInfo &&
642642 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
643643 BB && isa(BB->getTerminator())) &&
644 !isFuncletEHPersonality(classifyEHPersonality(Fn->getPersonalityFn())))
644 !isFuncletEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
645645 report("MBB has more than one landing pad successor", MBB);
646646
647647 // Call AnalyzeBranch. If it succeeds, there several more conditions to check.
7171 "Optimize machine instruction PHIs", false, false)
7272
7373 bool OptimizePHIs::runOnMachineFunction(MachineFunction &Fn) {
74 if (skipFunction(*Fn.getFunction()))
74 if (skipFunction(Fn.getFunction()))
7575 return false;
7676
7777 MRI = &Fn.getRegInfo();
5353 }
5454
5555 bool PatchableFunction::runOnMachineFunction(MachineFunction &MF) {
56 if (!MF.getFunction()->hasFnAttribute("patchable-function"))
56 if (!MF.getFunction().hasFnAttribute("patchable-function"))
5757 return false;
5858
5959 #ifndef NDEBUG
60 Attribute PatchAttr = MF.getFunction()->getFnAttribute("patchable-function");
60 Attribute PatchAttr = MF.getFunction().getFnAttribute("patchable-function");
6161 StringRef PatchType = PatchAttr.getValueAsString();
6262 assert(PatchType == "prologue-short-redirect" && "Only possibility today!");
6363 #endif
16611661 }
16621662
16631663 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
1664 if (skipFunction(*MF.getFunction()))
1664 if (skipFunction(MF.getFunction()))
16651665 return false;
16661666
16671667 DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
278278 }
279279
280280 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
281 if (skipFunction(*Fn.getFunction()))
281 if (skipFunction(Fn.getFunction()))
282282 return false;
283283
284284 TII = Fn.getSubtarget().getInstrInfo();
170170 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
171171 /// frame indexes with appropriate references.
172172 bool PEI::runOnMachineFunction(MachineFunction &Fn) {
173 const Function* F = Fn.getFunction();
173 const Function &F = Fn.getFunction();
174174 const TargetRegisterInfo *TRI = Fn.getSubtarget().getRegisterInfo();
175175 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
176176
205205 // called functions. Because of this, calculateCalleeSavedRegisters()
206206 // must be called before this function in order to set the AdjustsStack
207207 // and MaxCallFrameSize variables.
208 if (!F->hasFnAttribute(Attribute::Naked))
208 if (!F.hasFnAttribute(Attribute::Naked))
209209 insertPrologEpilogCode(Fn);
210210
211211 // Replace all MO_FrameIndex operands with physical register references
223223 MachineFrameInfo &MFI = Fn.getFrameInfo();
224224 uint64_t StackSize = MFI.getStackSize();
225225 if (WarnStackSize.getNumOccurrences() > 0 && WarnStackSize < StackSize) {
226 DiagnosticInfoStackSize DiagStackSize(*F, StackSize);
227 F->getContext().diagnose(DiagStackSize);
226 DiagnosticInfoStackSize DiagStackSize(F, StackSize);
227 F.getContext().diagnose(DiagStackSize);
228228 }
229229
230230 delete RS;
507507 assert(Fn.getProperties().hasProperty(
508508 MachineFunctionProperties::Property::NoVRegs));
509509
510 const Function *F = Fn.getFunction();
510 const Function &F = Fn.getFunction();
511511 const TargetFrameLowering *TFI = Fn.getSubtarget().getFrameLowering();
512512 MachineFrameInfo &MFI = Fn.getFrameInfo();
513513 MinCSFrameIndex = std::numeric_limits::max();
521521 assignCalleeSavedSpillSlots(Fn, SavedRegs, MinCSFrameIndex, MaxCSFrameIndex);
522522
523523 // Add the code to save and restore the callee saved registers.
524 if (!F->hasFnAttribute(Attribute::Naked)) {
524 if (!F.hasFnAttribute(Attribute::Naked)) {
525525 MFI.setCalleeSavedInfoValid(true);
526526
527527 std::vector &CSI = MFI.getCalleeSavedInfo();
951951
952952 ORE->emit([&]() {
953953 return MachineOptimizationRemarkAnalysis(DEBUG_TYPE, "StackSize",
954 Fn.getFunction()->getSubprogram(),
954 Fn.getFunction().getSubprogram(),
955955 &Fn.front())
956956 << ore::NV("NumStackBytes", StackSize) << " stack bytes in function";
957957 });
992992 // approach is rather similar to that of Segmented Stacks, but it uses a
993993 // different conditional check and another BIF for allocating more stack
994994 // space.
995 if (Fn.getFunction()->getCallingConv() == CallingConv::HiPE)
995 if (Fn.getFunction().getCallingConv() == CallingConv::HiPE)
996996 for (MachineBasicBlock *SaveBlock : SaveBlocks)
997997 TFI.adjustForHiPEPrologue(Fn, *SaveBlock);
998998 }
26412641 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
26422642 SmallVectorImpl &NewVRegs) {
26432643 CutOffInfo = CO_None;
2644 LLVMContext &Ctx = MF->getFunction()->getContext();
2644 LLVMContext &Ctx = MF->getFunction().getContext();
26452645 SmallVirtRegSet FixedRegisters;
26462646 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
26472647 if (Reg == ~0U && (CutOffInfo != CO_None)) {
798798 findVRegIntervalsToAlloc(MF, LIS);
799799
800800 #ifndef NDEBUG
801 const Function &F = *MF.getFunction();
801 const Function &F = MF.getFunction();
802802 std::string FullyQualifiedName =
803803 F.getParent()->getModuleIdentifier() + "." + F.getName().str();
804804 #endif
9494 unsigned RegMaskSize = (TRI->getNumRegs() + 31) / 32;
9595 RegMask.resize(RegMaskSize, 0xFFFFFFFF);
9696
97 const Function *F = MF.getFunction();
97 const Function &F = MF.getFunction();
9898
9999 PhysicalRegisterUsageInfo *PRUI = &getAnalysis();
100100
126126
127127 if (!TargetFrameLowering::isSafeForNoCSROpt(F)) {
128128 const uint32_t *CallPreservedMask =
129 TRI->getCallPreservedMask(MF, F->getCallingConv());
129 TRI->getCallPreservedMask(MF, F.getCallingConv());
130130 if (CallPreservedMask) {
131131 // Set callee saved register as preserved.
132132 for (unsigned i = 0; i < RegMaskSize; ++i)
144144
145145 DEBUG(dbgs() << " \n----------------------------------------\n");
146146
147 PRUI->storeUpdateRegUsageInfo(F, std::move(RegMask));
147 PRUI->storeUpdateRegUsageInfo(&F, std::move(RegMask));
148148
149149 return false;
150150 }
101101 }
102102
103103 bool RegUsageInfoPropagationPass::runOnMachineFunction(MachineFunction &MF) {
104 const Module *M = MF.getFunction()->getParent();
104 const Module *M = MF.getFunction().getParent();
105105 PhysicalRegisterUsageInfo *PRUI = &getAnalysis();
106106
107107 DEBUG(dbgs() << " ++++++++++++++++++++ " << getPassName()
5050 ++NumFunctionsReset;
5151 MF.reset();
5252 if (EmitFallbackDiag) {
53 const Function &F = *MF.getFunction();
53 const Function &F = MF.getFunction();
5454 DiagnosticInfoISelFallback DiagFallback(F);
5555 F.getContext().diagnose(DiagFallback);
5656 }
113113 : ScheduleDAG(mf), MLI(mli), MFI(mf.getFrameInfo()),
114114 RemoveKillFlags(RemoveKillFlags),
115115 UnknownValue(UndefValue::get(
116 Type::getVoidTy(mf.getFunction()->getContext()))) {
116 Type::getVoidTy(mf.getFunction().getContext()))) {
117117 DbgValues.clear();
118118
119119 const TargetSubtargetInfo &ST = mf.getSubtarget();
160160 DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL)
161161 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
162162 OptLevel(OL), AA(AA) {
163 ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
163 ForCodeSize = DAG.getMachineFunction().getFunction().optForSize();
164164
165165 MaximumLegalStoreInBits = 0;
166166 for (MVT VT : MVT::all_valuetypes())
29322932 // If integer divide is expensive and we satisfy the requirements, emit an
29332933 // alternate sequence. Targets may check function attributes for size/speed
29342934 // trade-offs.
2935 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2935 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
29362936 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
29372937 if (SDValue Op = BuildSDIV(N))
29382938 return Op;
30033003 }
30043004
30053005 // fold (udiv x, c) -> alternate
3006 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
3006 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
30073007 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr))
30083008 if (SDValue Op = BuildUDIV(N))
30093009 return Op;
30623062 }
30633063 }
30643064
3065 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
3065 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
30663066
30673067 // If X/C can be simplified by the division-by-constant logic, lower
30683068 // X%C to the equivalent of X-X/C*C.
1293912939 if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits)
1294012940 return false;
1294112941
12942 bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
12942 bool NoVectors = DAG.getMachineFunction().getFunction().hasFnAttribute(
1294312943 Attribute::NoImplicitFloat);
1294412944
1294512945 // This function cannot currently deal with non-byte-sized memory sizes.
1698516985 SDValue DAGCombiner::BuildSDIV(SDNode *N) {
1698616986 // when optimising for minimum size, we don't want to expand a div to a mul
1698716987 // and a shift.
16988 if (DAG.getMachineFunction().getFunction()->optForMinSize())
16988 if (DAG.getMachineFunction().getFunction().optForMinSize())
1698916989 return SDValue();
1699016990
1699116991 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
1703117031 SDValue DAGCombiner::BuildUDIV(SDNode *N) {
1703217032 // when optimising for minimum size, we don't want to expand a div to a mul
1703317033 // and a shift.
17034 if (DAG.getMachineFunction().getFunction()->optForMinSize())
17034 if (DAG.getMachineFunction().getFunction().optForMinSize())
1703517035 return SDValue();
1703617036
1703717037 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1));
20132013 // isTailCall may be true since the callee does not reference caller stack
20142014 // frame. Check if it's in the right position and that the return types match.
20152015 SDValue TCChain = InChain;
2016 const Function *F = DAG.getMachineFunction().getFunction();
2016 const Function &F = DAG.getMachineFunction().getFunction();
20172017 bool isTailCall =
20182018 TLI.isInTailCallPosition(DAG, Node, TCChain) &&
2019 (RetTy == F->getReturnType() || F->getReturnType()->isVoidTy());
2019 (RetTy == F.getReturnType() || F.getReturnType()->isVoidTy());
20202020 if (isTailCall)
20212021 InChain = TCChain;
20222022
908908 ORE = &NewORE;
909909 TLI = getSubtarget().getTargetLowering();
910910 TSI = getSubtarget().getSelectionDAGInfo();
911 Context = &MF->getFunction()->getContext();
911 Context = &MF->getFunction().getContext();
912912 }
913913
914914 SelectionDAG::~SelectionDAG() {
13301330 assert((TargetFlags == 0 || isTarget) &&
13311331 "Cannot set target flags on target-independent globals");
13321332 if (Alignment == 0)
1333 Alignment = MF->getFunction()->optForSize()
1333 Alignment = MF->getFunction().optForSize()
13341334 ? getDataLayout().getABITypeAlignment(C->getType())
13351335 : getDataLayout().getPrefTypeAlignment(C->getType());
13361336 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
50995099 // On Darwin, -Os means optimize for size without hurting performance, so
51005100 // only really optimize for size when -Oz (MinSize) is used.
51015101 if (MF.getTarget().getTargetTriple().isOSDarwin())
5102 return MF.getFunction()->optForMinSize();
5103 return MF.getFunction()->optForSize();
5102 return MF.getFunction().optForMinSize();
5103 return MF.getFunction().optForSize();
51045104 }
51055105
51065106 static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
15721572 EVT(TLI.getPointerTy(DL))));
15731573 }
15741574
1575 bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
1575 bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
15761576 CallingConv::ID CallConv =
1577 DAG.getMachineFunction().getFunction()->getCallingConv();
1577 DAG.getMachineFunction().getFunction().getCallingConv();
15781578 Chain = DAG.getTargetLoweringInfo().LowerReturn(
15791579 Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
15801580
21092109 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21102110 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
21112111 MachineFunction &MF = DAG.getMachineFunction();
2112 Value *Global = TLI.getSDagStackGuard(*MF.getFunction()->getParent());
2112 Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
21132113 MachineSDNode *Node =
21142114 DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
21152115 if (Global) {
21432143 SDValue Guard;
21442144 SDLoc dl = getCurSDLoc();
21452145 SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2146 const Module &M = *ParentBB->getParent()->getFunction()->getParent();
2146 const Module &M = *ParentBB->getParent()->getFunction().getParent();
21472147 unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
21482148
21492149 // Generate code to load the content of the guard slot.
47654765 if (Val == 0)
47664766 return DAG.getConstantFP(1.0, DL, LHS.getValueType());
47674767
4768 const Function *F = DAG.getMachineFunction().getFunction();
4769 if (!F->optForSize() ||
4768 const Function &F = DAG.getMachineFunction().getFunction();
4769 if (!F.optForSize() ||
47704770 // If optimizing for size, don't insert too many multiplies.
47714771 // This inserts up to 5 multiplies.
47724772 countPopulation(Val) + Log2_32(Val) < 7) {
56395639 case Intrinsic::stackguard: {
56405640 EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
56415641 MachineFunction &MF = DAG.getMachineFunction();
5642 const Module &M = *MF.getFunction()->getParent();
5642 const Module &M = *MF.getFunction().getParent();
56435643 SDValue Chain = getRoot();
56445644 if (TLI.useLoadStackGuardNode()) {
56455645 Res = getLoadStackGuard(DAG, sdl, Chain);
57475747 return nullptr;
57485748 case Intrinsic::gcroot: {
57495749 MachineFunction &MF = DAG.getMachineFunction();
5750 const Function *F = MF.getFunction();
5751 (void)F;
5752 assert(F->hasGC() &&
5750 assert(MF.getFunction().hasGC() &&
57535751 "only valid in functions with gc specified, enforced by Verifier");
57545752 assert(GFI && "implied by previous");
57555753 const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
98689866 // Don't perform if there is only one cluster or optimizing for size.
98699867 if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
98709868 TM.getOptLevel() == CodeGenOpt::None ||
9871 SwitchMBB->getParent()->getFunction()->optForMinSize())
9869 SwitchMBB->getParent()->getFunction().optForMinSize())
98729870 return SwitchMBB;
98739871
98749872 BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
1002010018 unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
1002110019
1002210020 if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None &&
10023 !DefaultMBB->getParent()->getFunction()->optForMinSize()) {
10021 !DefaultMBB->getParent()->getFunction().optForMinSize()) {
1002410022 // For optimized builds, lower large range as a balanced binary tree.
1002510023 splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
1002610024 continue;
211211 IS.OptLevel = NewOptLevel;
212212 IS.TM.setOptLevel(NewOptLevel);
213213 DEBUG(dbgs() << "\nChanging optimization level for Function "
214 << IS.MF->getFunction()->getName() << "\n");
214 << IS.MF->getFunction().getName() << "\n");
215215 DEBUG(dbgs() << "\tBefore: -O" << SavedOptLevel
216216 << " ; After: -O" << NewOptLevel << "\n");
217217 SavedFastISel = IS.TM.Options.EnableFastISel;
227227 if (IS.OptLevel == SavedOptLevel)
228228 return;
229229 DEBUG(dbgs() << "\nRestoring optimization level for Function "
230 << IS.MF->getFunction()->getName() << "\n");
230 << IS.MF->getFunction().getName() << "\n");
231231 DEBUG(dbgs() << "\tBefore: -O" << IS.OptLevel
232232 << " ; After: -O" << SavedOptLevel << "\n");
233233 IS.OptLevel = SavedOptLevel;
383383 assert((!EnableFastISelAbort || TM.Options.EnableFastISel) &&
384384 "-fast-isel-abort > 0 requires -fast-isel");
385385
386 const Function &Fn = *mf.getFunction();
386 const Function &Fn = mf.getFunction();
387387 MF = &mf;
388388
389389 // Reset the target options before resetting the optimization
5151 /// so, it sets Chain to the input chain of the tail call.
5252 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
5353 SDValue &Chain) const {
54 const Function *F = DAG.getMachineFunction().getFunction();
54 const Function &F = DAG.getMachineFunction().getFunction();
5555
5656 // Conservatively require the attributes of the call to match those of
5757 // the return. Ignore noalias because it doesn't affect the call sequence.
58 AttributeList CallerAttrs = F->getAttributes();
58 AttributeList CallerAttrs = F.getAttributes();
5959 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex)
6060 .removeAttribute(Attribute::NoAlias)
6161 .hasAttributes())
29622962 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
29632963 SelectionDAG &DAG,
29642964 std::vector *Created) const {
2965 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
2965 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
29662966 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29672967 if (TLI.isIntDivCheap(N->getValueType(0), Attr))
29682968 return SDValue(N,0); // Lower SDIV as SDIV
448448 }
449449
450450 bool ShrinkWrap::runOnMachineFunction(MachineFunction &MF) {
451 if (skipFunction(*MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
451 if (skipFunction(MF.getFunction()) || MF.empty() || !isShrinkWrapEnabled(MF))
452452 return false;
453453
454454 DEBUG(dbgs() << "**** Analysing " << MF.getName() << '\n');
568568 // of the crash. Since a crash can happen anywhere, the
569569 // frame must be lowered before anything else happen for the
570570 // sanitizers to be able to get a correct stack frame.
571 !(MF.getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
572 MF.getFunction()->hasFnAttribute(Attribute::SanitizeThread) ||
573 MF.getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
574 MF.getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress));
571 !(MF.getFunction().hasFnAttribute(Attribute::SanitizeAddress) ||
572 MF.getFunction().hasFnAttribute(Attribute::SanitizeThread) ||
573 MF.getFunction().hasFnAttribute(Attribute::SanitizeMemory) ||
574 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress));
575575 // If EnableShrinkWrap is set, it takes precedence on whatever the
576576 // target sets. The rational is that we assume we want to test
577577 // something related to shrink-wrapping.
11281128
11291129 bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
11301130 DEBUG(dbgs() << "********** Stack Coloring **********\n"
1131 << "********** Function: "
1132 << ((const Value*)Func.getFunction())->getName() << '\n');
1131 << "********** Function: " << Func.getName() << '\n');
11331132 MF = &Func;
11341133 MFI = &MF->getFrameInfo();
11351134 Indexes = &getAnalysis();
11691168 // Don't continue because there are not enough lifetime markers, or the
11701169 // stack is too small, or we are told not to optimize the slots.
11711170 if (NumMarkers < 2 || TotalSize < 16 || DisableColoring ||
1172 skipFunction(*Func.getFunction())) {
1171 skipFunction(Func.getFunction())) {
11731172 DEBUG(dbgs()<<"Will not try to merge slots.\n");
11741173 return removeAllMarkers();
11751174 }
4848 INITIALIZE_PASS(TailDuplicatePass, DEBUG_TYPE, "Tail Duplication", false, false)
4949
5050 bool TailDuplicatePass::runOnMachineFunction(MachineFunction &MF) {
51 if (skipFunction(*MF.getFunction()))
51 if (skipFunction(MF.getFunction()))
5252 return false;
5353
5454 auto MBPI = &getAnalysis();
549549 unsigned MaxDuplicateCount;
550550 if (TailDupSize == 0 &&
551551 TailDuplicateSize.getNumOccurrences() == 0 &&
552 MF->getFunction()->optForSize())
552 MF->getFunction().optForSize())
553553 MaxDuplicateCount = 1;
554554 else if (TailDupSize == 0)
555555 MaxDuplicateCount = TailDuplicateSize;
3131
3232 /// The default implementation just looks at attribute "no-frame-pointer-elim".
3333 bool TargetFrameLowering::noFramePointerElim(const MachineFunction &MF) const {
34 auto Attr = MF.getFunction()->getFnAttribute("no-frame-pointer-elim");
34 auto Attr = MF.getFunction().getFnAttribute("no-frame-pointer-elim");
3535 return Attr.getValueAsString() == "true";
3636 }
3737
8181 return;
8282
8383 // In Naked functions we aren't going to save any registers.
84 if (MF.getFunction()->hasFnAttribute(Attribute::Naked))
84 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
8585 return;
8686
8787 // Functions which call __builtin_unwind_init get all their registers saved.
9898 const MachineFunction &MF) const {
9999 // When HHVM function is called, the stack is skewed as the return address
100100 // is removed from the stack before we enter the function.
101 if (LLVM_UNLIKELY(MF.getFunction()->getCallingConv() == CallingConv::HHVM))
101 if (LLVM_UNLIKELY(MF.getFunction().getCallingConv() == CallingConv::HHVM))
102102 return MF.getTarget().getPointerSize();
103103
104104 return 0;
15911591 /// Get the reciprocal estimate attribute string for a function that will
15921592 /// override the target defaults.
15931593 static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
1594 const Function *F = MF.getFunction();
1595 return F->getFnAttribute("reciprocal-estimates").getValueAsString();
1594 const Function &F = MF.getFunction();
1595 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
15961596 }
15971597
15981598 /// Construct a string for the given reciprocal operation of the given type.
2727 return true;
2828
2929 // Check to see if we should eliminate non-leaf frame pointers.
30 if (MF.getFunction()->hasFnAttribute("no-frame-pointer-elim-non-leaf"))
30 if (MF.getFunction().hasFnAttribute("no-frame-pointer-elim-non-leaf"))
3131 return MF.getFrameInfo().hasCalls();
3232
3333 return false;
421421 }
422422
423423 bool TargetRegisterInfo::canRealignStack(const MachineFunction &MF) const {
424 return !MF.getFunction()->hasFnAttribute("no-realign-stack");
424 return !MF.getFunction().hasFnAttribute("no-realign-stack");
425425 }
426426
427427 bool TargetRegisterInfo::needsStackRealignment(
428428 const MachineFunction &MF) const {
429429 const MachineFrameInfo &MFI = MF.getFrameInfo();
430430 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
431 const Function *F = MF.getFunction();
431 const Function &F = MF.getFunction();
432432 unsigned StackAlign = TFI->getStackAlignment();
433433 bool requiresRealignment = ((MFI.getMaxAlignment() > StackAlign) ||
434 F->hasFnAttribute(Attribute::StackAlignment));
435 if (MF.getFunction()->hasFnAttribute("stackrealign") || requiresRealignment) {
434 F.hasFnAttribute(Attribute::StackAlignment));
435 if (F.hasFnAttribute("stackrealign") || requiresRealignment) {
436436 if (canRealignStack(MF))
437437 return true;
438 DEBUG(dbgs() << "Can't realign function's stack: " << F->getName() << "\n");
438 DEBUG(dbgs() << "Can't realign function's stack: " << F.getName() << "\n");
439439 }
440440 return false;
441441 }
16621662 OptLevel = TM.getOptLevel();
16631663 // Disable optimizations if requested. We cannot skip the whole pass as some
16641664 // fixups are necessary for correctness.
1665 if (skipFunction(*Func.getFunction()))
1665 if (skipFunction(Func.getFunction()))
16661666 OptLevel = CodeGenOpt::None;
16671667
16681668 bool MadeChange = false;
141141 }
142142
143143 bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
144 auto &F = *MF.getFunction();
144 auto &F = MF.getFunction();
145145 auto InstrAttr = F.getFnAttribute("function-instrument");
146146 bool AlwaysInstrument = !InstrAttr.hasAttribute(Attribute::None) &&
147147 InstrAttr.isStringAttribute() &&
307307 //===----------------------------------------------------------------------===//
308308
309309 bool AArch64A57FPLoadBalancing::runOnMachineFunction(MachineFunction &F) {
310 if (skipFunction(*F.getFunction()))
310 if (skipFunction(F.getFunction()))
311311 return false;
312312
313313 if (!F.getSubtarget().balanceFPOps())
392392 bool Changed = false;
393393 DEBUG(dbgs() << "***** AArch64AdvSIMDScalar *****\n");
394394
395 if (skipFunction(*mf.getFunction()))
395 if (skipFunction(mf.getFunction()))
396396 return false;
397397
398398 MRI = &mf.getRegInfo();
219219 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
220220 const Value *Val, unsigned VReg) const {
221221 MachineFunction &MF = MIRBuilder.getMF();
222 const Function &F = *MF.getFunction();
222 const Function &F = MF.getFunction();
223223
224224 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
225225 assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
321321 const ArgInfo &OrigRet,
322322 ArrayRef OrigArgs) const {
323323 MachineFunction &MF = MIRBuilder.getMF();
324 const Function &F = *MF.getFunction();
324 const Function &F = MF.getFunction();
325325 MachineRegisterInfo &MRI = MF.getRegInfo();
326326 auto &DL = F.getParent()->getDataLayout();
327327
4141 }
4242
4343 bool runOnMachineFunction(MachineFunction &MF) override {
44 if (skipFunction(*MF.getFunction()))
44 if (skipFunction(MF.getFunction()))
4545 return false;
4646
4747 AArch64FunctionInfo *AFI = MF.getInfo();
481481 }
482482
483483 bool AArch64CollectLOH::runOnMachineFunction(MachineFunction &MF) {
484 if (skipFunction(*MF.getFunction()))
484 if (skipFunction(MF.getFunction()))
485485 return false;
486486
487487 DEBUG(dbgs() << "********** AArch64 Collect LOH **********\n"
289289 }
290290
291291 bool AArch64CondBrTuning::runOnMachineFunction(MachineFunction &MF) {
292 if (skipFunction(*MF.getFunction()))
292 if (skipFunction(MF.getFunction()))
293293 return false;
294294
295295 DEBUG(dbgs() << "********** AArch64 Conditional Branch Tuning **********\n"
326326 bool AArch64ConditionOptimizer::runOnMachineFunction(MachineFunction &MF) {
327327 DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
328328 << "********** Function: " << MF.getName() << '\n');
329 if (skipFunction(*MF.getFunction()))
329 if (skipFunction(MF.getFunction()))
330330 return false;
331331
332332 TII = MF.getSubtarget().getInstrInfo();
923923 bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) {
924924 DEBUG(dbgs() << "********** AArch64 Conditional Compares **********\n"
925925 << "********** Function: " << MF.getName() << '\n');
926 if (skipFunction(*MF.getFunction()))
926 if (skipFunction(MF.getFunction()))
927927 return false;
928928
929929 TII = MF.getSubtarget().getInstrInfo();
935935 MBPI = &getAnalysis();
936936 Traces = &getAnalysis();
937937 MinInstr = nullptr;
938 MinSize = MF.getFunction()->optForMinSize();
938 MinSize = MF.getFunction().optForMinSize();
939939
940940 bool Changed = false;
941941 CmpConv.runOnMachineFunction(MF, MBPI);
197197 // Scan the function for instructions that have a dead definition of a
198198 // register. Replace that register with the zero register when possible.
199199 bool AArch64DeadRegisterDefinitions::runOnMachineFunction(MachineFunction &MF) {
200 if (skipFunction(*MF.getFunction()))
200 if (skipFunction(MF.getFunction()))
201201 return false;
202202
203203 TRI = MF.getSubtarget().getRegisterInfo();
797797 if (ST.getProcFamily() != AArch64Subtarget::Falkor)
798798 return false;
799799
800 if (skipFunction(*Fn.getFunction()))
800 if (skipFunction(Fn.getFunction()))
801801 return false;
802802
803803 TII = static_cast(ST.getInstrInfo());
173173 return false;
174174 // Don't use the red zone if the function explicitly asks us not to.
175175 // This is typically used for kernel code.
176 if (MF.getFunction()->hasFnAttribute(Attribute::NoRedZone))
176 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
177177 return false;
178178
179179 const MachineFrameInfo &MFI = MF.getFrameInfo();
458458 MachineBasicBlock &MBB) const {
459459 MachineBasicBlock::iterator MBBI = MBB.begin();
460460 const MachineFrameInfo &MFI = MF.getFrameInfo();
461 const Function *Fn = MF.getFunction();
461 const Function &F = MF.getFunction();
462462 const AArch64Subtarget &Subtarget = MF.getSubtarget();
463463 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
464464 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
465465 MachineModuleInfo &MMI = MF.getMMI();
466466 AArch64FunctionInfo *AFI = MF.getInfo();
467 bool needsFrameMoves = MMI.hasDebugInfo() || Fn->needsUnwindTableEntry();
467 bool needsFrameMoves = MMI.hasDebugInfo() || F.needsUnwindTableEntry();
468468 bool HasFP = hasFP(MF);
469469
470470 // Debug location must be unknown since the first debug location is used
473473
474474 // All calls are tail calls in GHC calling conv, and functions have no
475475 // prologue/epilogue.
476 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
476 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
477477 return;
478478
479479 int NumBytes = (int)MFI.getStackSize();
506506 }
507507
508508 bool IsWin64 =
509 Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
509 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
510510 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
511511
512512 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
715715
716716 // All calls are tail calls in GHC calling conv, and functions have no
717717 // prologue/epilogue.
718 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
718 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
719719 return;
720720
721721 // Initial and residual are named for consistency with the prologue. Note that
764764 // it as the 2nd argument of AArch64ISD::TC_RETURN.
765765
766766 bool IsWin64 =
767 Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
767 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
768768 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
769769
770770 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
856856 const AArch64FunctionInfo *AFI = MF.getInfo();
857857 const AArch64Subtarget &Subtarget = MF.getSubtarget();
858858 bool IsWin64 =
859 Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv());
859 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
860860 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
861861 int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16;
862862 int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize();
927927
928928 static bool produceCompactUnwindFrame(MachineFunction &MF) {
929929 const AArch64Subtarget &Subtarget = MF.getSubtarget();
930 AttributeList Attrs = MF.getFunction()->getAttributes();
930 AttributeList Attrs = MF.getFunction().getAttributes();
931931 return Subtarget.isTargetMachO() &&
932932 !(Subtarget.getTargetLowering()->supportSwiftError() &&
933933 Attrs.hasAttrSomewhere(Attribute::SwiftError));
958958
959959 AArch64FunctionInfo *AFI = MF.getInfo();
960960 MachineFrameInfo &MFI = MF.getFrameInfo();
961 CallingConv::ID CC = MF.getFunction()->getCallingConv();
961 CallingConv::ID CC = MF.getFunction().getCallingConv();
962962 unsigned Count = CSI.size();
963963 (void)CC;
964964 // MachO's compact unwind format relies on all registers being stored in
11531153 RegScavenger *RS) const {
11541154 // All calls are tail calls in GHC calling conv, and functions have no
11551155 // prologue/epilogue.
1156 if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
1156 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
11571157 return;
11581158
11591159 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
5252 }
5353
5454 bool runOnMachineFunction(MachineFunction &MF) override {
55 ForCodeSize = MF.getFunction()->optForSize();
55 ForCodeSize = MF.getFunction().optForSize();
5656 Subtarget = &MF.getSubtarget();
5757 return SelectionDAGISel::runOnMachineFunction(MF);
5858 }
27302730 SelectionDAG &DAG, SmallVectorImpl &InVals) const {
27312731 MachineFunction &MF = DAG.getMachineFunction();
27322732 MachineFrameInfo &MFI = MF.getFrameInfo();
2733 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
2733 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
27342734
27352735 // Assign locations to all of the incoming arguments.
27362736 SmallVector ArgLocs;
27442744 // we use a special version of AnalyzeFormalArguments to pass in ValVT and
27452745 // LocVT.
27462746 unsigned NumArgs = Ins.size();
2747 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
2747 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
27482748 unsigned CurArgIdx = 0;
27492749 for (unsigned i = 0; i != NumArgs; ++i) {
27502750 MVT ValVT = Ins[i].VT;
29342934 MachineFrameInfo &MFI = MF.getFrameInfo();
29352935 AArch64FunctionInfo *FuncInfo = MF.getInfo();
29362936 auto PtrVT = getPointerTy(DAG.getDataLayout());
2937 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
2937 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv());
29382938
29392939 SmallVector MemOps;
29402940
30863086 return false;
30873087
30883088 MachineFunction &MF = DAG.getMachineFunction();
3089 const Function *CallerF = MF.getFunction();
3090 CallingConv::ID CallerCC = CallerF->getCallingConv();
3089 const Function &CallerF = MF.getFunction();
3090 CallingConv::ID CallerCC = CallerF.getCallingConv();
30913091 bool CCMatch = CallerCC == CalleeCC;
30923092
30933093 // Byval parameters hand the function a pointer directly into the stack area
30943094 // we want to reuse during a tail call. Working around this *is* possible (see
30953095 // X86) but less efficient and uglier in LowerCall.
3096 for (Function::const_arg_iterator i = CallerF->arg_begin(),
3097 e = CallerF->arg_end();
3096 for (Function::const_arg_iterator i = CallerF.arg_begin(),
3097 e = CallerF.arg_end();
30983098 i != e; ++i)
30993099 if (i->hasByValAttr())
31003100 return false;
41844184 }
41854185
41864186 SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const {
4187 if (DAG.getMachineFunction().getFunction()->hasFnAttribute(
4187 if (DAG.getMachineFunction().getFunction().hasFnAttribute(
41884188 Attribute::NoImplicitFloat))
41894189 return SDValue();
41904190
46674667 SelectionDAG &DAG) const {
46684668 MachineFunction &MF = DAG.getMachineFunction();
46694669
4670 if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
4670 if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()))
46714671 return LowerWin64_VASTART(Op, DAG);
46724672 else if (Subtarget->isTargetDarwin())
46734673 return LowerDarwin_VASTART(Op, DAG);
79087908 // instruction to materialize the v2i64 zero and one store (with restrictive
79097909 // addressing mode). Just do two i64 store of zero-registers.
79107910 bool Fast;
7911 const Function *F = MF.getFunction();
7911 const Function &F = MF.getFunction();
79127912 if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 &&
7913 !F->hasFnAttribute(Attribute::NoImplicitFloat) &&
7913 !F.hasFnAttribute(Attribute::NoImplicitFloat) &&
79147914 (memOpAlign(SrcAlign, DstAlign, 16) ||
79157915 (allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast)))
79167916 return MVT::f128;
81558155 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
81568156 SelectionDAG &DAG,
81578157 std::vector *Created) const {
8158 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
8158 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
81598159 if (isIntDivCheap(N->getValueType(0), Attr))
81608160 return SDValue(N,0); // Lower SDIV as SDIV
81618161
95769576 return SDValue();
95779577
95789578 // Don't split at -Oz.
9579 if (DAG.getMachineFunction().getFunction()->optForMinSize())
9579 if (DAG.getMachineFunction().getFunction().optForMinSize())
95809580 return SDValue();
95819581
95829582 // Don't split v2i64 vectors. Memcpy lowering produces those and splitting
1093810938 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
1093910939 // nounwind. If we want to generalize this later, we may need to emit
1094010940 // CFI pseudo-instructions.
10941 assert(Entry->getParent()->getFunction()->hasFnAttribute(
10941 assert(Entry->getParent()->getFunction().hasFnAttribute(
1094210942 Attribute::NoUnwind) &&
1094310943 "Function should be nounwind in insertCopiesSplitCSR!");
1094410944 Entry->addLiveIn(*I);
414414 // Do not merge to float value size (128 bytes) if no implicit
415415 // float attribute is set.
416416
417 bool NoFloat = DAG.getMachineFunction().getFunction()->hasFnAttribute(
417 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
418418 Attribute::NoImplicitFloat);
419419
420420 if (NoFloat)
443443 }
444444
445445 bool supportSplitCSR(MachineFunction *MF) const override {
446 return MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
447 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
446 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
447 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
448448 }
449449 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
450450 void insertCopiesSplitCSR(
47524752
47534753 bool AArch64InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
47544754 bool OutlineFromLinkOnceODRs) const {
4755 const Function *F = MF.getFunction();
4755 const Function &F = MF.getFunction();
47564756
47574757 // If F uses a redzone, then don't outline from it because it might mess up
47584758 // the stack.
4759 if (!F->hasFnAttribute(Attribute::NoRedZone))
4759 if (!F.hasFnAttribute(Attribute::NoRedZone))
47604760 return false;
47614761
47624762 // If anyone is using the address of this function, don't outline from it.
4763 if (F->hasAddressTaken())
4763 if (F.hasAddressTaken())
47644764 return false;
47654765
47664766 // Can F be deduplicated by the linker? If it can, don't outline from it.
4767 if (!OutlineFromLinkOnceODRs && F->hasLinkOnceODRLinkage())
4768 return false;
4769
4767 if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
4768 return false;
4769
47704770 return true;
47714771 }
47724772
327327 // the Function object through the Subtarget and objections were raised
328328 // to that (see post-commit review comments for r301750).
329329 let RecomputePerFunction = 1 in {
330 def ForCodeSize : Predicate<"MF->getFunction()->optForSize()">;
331 def NotForCodeSize : Predicate<"!MF->getFunction()->optForSize()">;
330 def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
331 def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
332332 // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
333 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction()->optForSize()">;
333 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
334334 }
335335
336336 include "AArch64InstrFormats.td"
17581758 }
17591759
17601760 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1761 if (skipFunction(*Fn.getFunction()))
1761 if (skipFunction(Fn.getFunction()))
17621762 return false;
17631763
17641764 Subtarget = &static_cast(Fn.getSubtarget());
484484
485485 bool AArch64RedundantCopyElimination::runOnMachineFunction(
486486 MachineFunction &MF) {
487 if (skipFunction(*MF.getFunction()))
487 if (skipFunction(MF.getFunction()))
488488 return false;
489489 TRI = MF.getSubtarget().getRegisterInfo();
490490 MRI = &MF.getRegInfo();
4141 const MCPhysReg *
4242 AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
4343 assert(MF && "Invalid MachineFunction pointer.");
44 if (MF->getFunction()->getCallingConv() == CallingConv::GHC)
44 if (MF->getFunction().getCallingConv() == CallingConv::GHC)
4545 // GHC set of callee saved regs is empty as all those regs are
4646 // used for passing STG regs around
4747 return CSR_AArch64_NoRegs_SaveList;
48 if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg)
48 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
4949 return CSR_AArch64_AllRegs_SaveList;
50 if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS)
50 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS)
5151 return MF->getInfo()->isSplitCSR() ?
5252 CSR_AArch64_CXX_TLS_Darwin_PE_SaveList :
5353 CSR_AArch64_CXX_TLS_Darwin_SaveList;
5454 if (MF->getSubtarget().getTargetLowering()
5555 ->supportSwiftError() &&
56 MF->getFunction()->getAttributes().hasAttrSomewhere(
56 MF->getFunction().getAttributes().hasAttrSomewhere(
5757 Attribute::SwiftError))
5858 return CSR_AArch64_AAPCS_SwiftError_SaveList;
59 if (MF->getFunction()->getCallingConv() == CallingConv::PreserveMost)
59 if (MF->getFunction().getCallingConv() == CallingConv::PreserveMost)
6060 return CSR_AArch64_RT_MostRegs_SaveList;
6161 else
6262 return CSR_AArch64_AAPCS_SaveList;
6565 const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
6666 const MachineFunction *MF) const {
6767 assert(MF && "Invalid MachineFunction pointer.");
68 if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
68 if (MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
6969 MF->getInfo()->isSplitCSR())
7070 return CSR_AArch64_CXX_TLS_Darwin_ViaCopy_SaveList;
7171 return nullptr;
8383 return CSR_AArch64_CXX_TLS_Darwin_RegMask;
8484 if (MF.getSubtarget().getTargetLowering()
8585 ->supportSwiftError() &&
86 MF.getFunction()->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
86 MF.getFunction().getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8787 return CSR_AArch64_AAPCS_SwiftError_RegMask;
8888 if (CC == CallingConv::PreserveMost)
8989 return CSR_AArch64_RT_MostRegs_RegMask;
689689 }
690690
691691 bool AArch64SIMDInstrOpt::runOnMachineFunction(MachineFunction &MF) {
692 if (skipFunction(*MF.getFunction()))
692 if (skipFunction(MF.getFunction()))
693693 return false;
694694
695695 TII = MF.getSubtarget().getInstrInfo();
119119 }
120120
121121 bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
122 if (skipFunction(*MF.getFunction()))
122 if (skipFunction(MF.getFunction()))
123123 return false;
124124
125125 const TargetSubtargetInfo &ST = MF.getSubtarget();
204204 if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
205205 return;
206206
207 HSAMetadataStream.emitKernel(*MF->getFunction(),
207 HSAMetadataStream.emitKernel(MF->getFunction(),
208208 getHSACodeProps(*MF, CurrentProgramInfo),
209209 getHSADebugProps(*MF, CurrentProgramInfo));
210210 }
214214 const AMDGPUSubtarget &STM = MF->getSubtarget();
215215 if (MFI->isEntryFunction() && STM.isAmdCodeObjectV2(*MF)) {
216216 SmallString<128> SymbolName;
217 getNameWithPrefix(SymbolName, MF->getFunction()),
217 getNameWithPrefix(SymbolName, &MF->getFunction()),
218218 getTargetStreamer()->EmitAMDGPUSymbolType(
219219 SymbolName, ELF::STT_AMDGPU_HSA_KERNEL);
220220 }
221221 const AMDGPUSubtarget &STI = MF->getSubtarget();
222222 if (STI.dumpCode()) {
223223 // Disassemble function name label to text.
224 DisasmLines.push_back(MF->getFunction()->getName().str() + ":");
224 DisasmLines.push_back(MF->getName().str() + ":");
225225 DisasmLineMaxLen = std::max(DisasmLineMaxLen, DisasmLines.back().size());
226226 HexLines.push_back("");
227227 }
313313 getSIProgramInfo(CurrentProgramInfo, MF);
314314 } else {
315315 auto I = CallGraphResourceInfo.insert(
316 std::make_pair(MF.getFunction(), SIFunctionResourceInfo()));
316 std::make_pair(&MF.getFunction(), SIFunctionResourceInfo()));
317317 SIFunctionResourceInfo &Info = I.first->second;
318318 assert(I.second && "should only be called once per function");
319319 Info = analyzeResourceUsage(MF);
342342 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
343343 if (!MFI->isEntryFunction()) {
344344 OutStreamer->emitRawComment(" Function info:", false);
345 SIFunctionResourceInfo &Info = CallGraphResourceInfo[MF.getFunction()];
345 SIFunctionResourceInfo &Info = CallGraphResourceInfo[&MF.getFunction()];
346346 emitCommonFunctionComments(
347347 Info.NumVGPR,
348348 Info.getTotalNumSGPRs(MF.getSubtarget()),
468468 unsigned RsrcReg;
469469 if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
470470 // Evergreen / Northern Islands
471 switch (MF.getFunction()->getCallingConv()) {
471 switch (MF.getFunction().getCallingConv()) {
472472 default: LLVM_FALLTHROUGH;
473473 case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
474474 case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
477477 }
478478 } else {
479479 // R600 / R700
480 switch (MF.getFunction()->getCallingConv()) {
480 switch (MF.getFunction().getCallingConv()) {
481481 default: LLVM_FALLTHROUGH;
482482 case CallingConv::AMDGPU_GS: LLVM_FALLTHROUGH;
483483 case CallingConv::AMDGPU_CS: LLVM_FALLTHROUGH;
492492 OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
493493 OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
494494
495 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
495 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
496496 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
497497 OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4);
498498 }
786786 ProgInfo.DynamicCallStack = Info.HasDynamicallySizedStack || Info.HasRecursion;
787787
788788 if (!isUInt<32>(ProgInfo.ScratchSize)) {
789 DiagnosticInfoStackSize DiagStackSize(*MF.getFunction(),
789 DiagnosticInfoStackSize DiagStackSize(MF.getFunction(),
790790 ProgInfo.ScratchSize, DS_Error);
791 MF.getFunction()->getContext().diagnose(DiagStackSize);
791 MF.getFunction().getContext().diagnose(DiagStackSize);
792792 }
793793
794794 const SISubtarget &STM = MF.getSubtarget();
807807 unsigned MaxAddressableNumSGPRs = STM.getAddressableNumSGPRs();
808808 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
809809 // This can happen due to a compiler bug or when using inline asm.
810 LLVMContext &Ctx = MF.getFunction()->getContext();
811 DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
810 LLVMContext &Ctx = MF.getFunction().getContext();
811 DiagnosticInfoResourceLimit Diag(MF.getFunction(),
812812 "addressable scalar registers",
813813 ProgInfo.NumSGPR, DS_Error,
814814 DK_ResourceLimit,
835835 if (ProgInfo.NumSGPR > MaxAddressableNumSGPRs) {
836836 // This can happen due to a compiler bug or when using inline asm to use
837837 // the registers which are usually reserved for vcc etc.
838 LLVMContext &Ctx = MF.getFunction()->getContext();
839 DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
838 LLVMContext &Ctx = MF.getFunction().getContext();
839 DiagnosticInfoResourceLimit Diag(MF.getFunction(),
840840 "scalar registers",
841841 ProgInfo.NumSGPR, DS_Error,
842842 DK_ResourceLimit,
855855 }
856856
857857 if (MFI->getNumUserSGPRs() > STM.getMaxNumUserSGPRs()) {
858 LLVMContext &Ctx = MF.getFunction()->getContext();
859 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
858 LLVMContext &Ctx = MF.getFunction().getContext();
859 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "user SGPRs",
860860 MFI->getNumUserSGPRs(), DS_Error);
861861 Ctx.diagnose(Diag);
862862 }
863863
864864 if (MFI->getLDSSize() > static_cast(STM.getLocalMemorySize())) {
865 LLVMContext &Ctx = MF.getFunction()->getContext();
866 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
865 LLVMContext &Ctx = MF.getFunction().getContext();
866 DiagnosticInfoResourceLimit Diag(MF.getFunction(), "local memory",
867867 MFI->getLDSSize(), DS_Error);
868868 Ctx.diagnose(Diag);
869869 }
976976 const SIProgramInfo &CurrentProgramInfo) {
977977 const SISubtarget &STM = MF.getSubtarget();
978978 const SIMachineFunctionInfo *MFI = MF.getInfo();
979 unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
980
981 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
979 unsigned RsrcReg = getRsrcReg(MF.getFunction().getCallingConv());
980
981 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
982982 OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
983983
984984 OutStreamer->EmitIntValue(CurrentProgramInfo.ComputePGMRSrc1, 4);
996996 OutStreamer->EmitIntValue(S_00B028_VGPRS(CurrentProgramInfo.VGPRBlocks) |
997997 S_00B028_SGPRS(CurrentProgramInfo.SGPRBlocks), 4);
998998 unsigned Rsrc2Val = 0;
999 if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
999 if (STM.isVGPRSpillingEnabled(MF.getFunction())) {
10001000 OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
10011001 OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(CurrentProgramInfo.ScratchBlocks), 4);
10021002 if (TM.getTargetTriple().getOS() == Triple::AMDPAL)
10031003 Rsrc2Val = S_00B84C_SCRATCH_EN(CurrentProgramInfo.ScratchBlocks > 0);
10041004 }
1005 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
1005 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
10061006 OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
10071007 OutStreamer->EmitIntValue(MFI->getPSInputEnable(), 4);
10081008 OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
10351035 // we can use the same fixed value that .AMDGPU.config has for Mesa. Note
10361036 // that we use a register number rather than a byte offset, so we need to
10371037 // divide by 4.
1038 unsigned Rsrc1Reg = getRsrcReg(MF.getFunction()->getCallingConv()) / 4;
1038 unsigned Rsrc1Reg = getRsrcReg(MF.getFunction().getCallingConv()) / 4;
10391039 unsigned Rsrc2Reg = Rsrc1Reg + 1;
10401040 // Also calculate the PAL metadata key for *S_SCRATCH_SIZE. It can be used
10411041 // with a constant offset to access any non-register shader-specific PAL
10421042 // metadata key.
10431043 unsigned ScratchSizeKey = PALMD::Key::CS_SCRATCH_SIZE;
1044 switch (MF.getFunction()->getCallingConv()) {
1044 switch (MF.getFunction().getCallingConv()) {
10451045 case CallingConv::AMDGPU_PS:
10461046 ScratchSizeKey = PALMD::Key::PS_SCRATCH_SIZE;
10471047 break;
10671067 PALMD::Key::VS_NUM_USED_SGPRS - PALMD::Key::VS_SCRATCH_SIZE;
10681068 PALMetadataMap[NumUsedVgprsKey] = CurrentProgramInfo.NumVGPRsForWavesPerEU;
10691069 PALMetadataMap[NumUsedSgprsKey] = CurrentProgramInfo.NumSGPRsForWavesPerEU;
1070 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
1070 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
10711071 PALMetadataMap[Rsrc1Reg] |= CurrentProgramInfo.ComputePGMRSrc1;
10721072 PALMetadataMap[Rsrc2Reg] |= CurrentProgramInfo.ComputePGMRSrc2;
10731073 // ScratchSize is in bytes, 16 aligned.
10821082 PALMetadataMap[ScratchSizeKey] |=
10831083 alignTo(CurrentProgramInfo.ScratchSize, 16);
10841084 }
1085 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
1085 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS) {
10861086 PALMetadataMap[Rsrc2Reg] |=
10871087 S_00B02C_EXTRA_LDS_SIZE(CurrentProgramInfo.LDSBlocks);
10881088 PALMetadataMap[R_0286CC_SPI_PS_INPUT_ENA / 4] |= MFI->getPSInputEnable();
4242 MachineFunction &MF = MIRBuilder.getMF();
4343 const SIMachineFunctionInfo *MFI = MF.getInfo();
4444 MachineRegisterInfo &MRI = MF.getRegInfo();
45 const Function &F = *MF.getFunction();
45 const Function &F = MF.getFunction();
4646 const DataLayout &DL = F.getParent()->getDataLayout();
4747 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
4848 LLT PtrType = getLLTForType(*PtrTy, DL);
6363 Type *ParamTy, unsigned Offset,
6464 unsigned DstReg) const {
6565 MachineFunction &MF = MIRBuilder.getMF();
66 const Function &F = *MF.getFunction();
66 const Function &F = MF.getFunction();
6767 const DataLayout &DL = F.getParent()->getDataLayout();
6868 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUASI.CONSTANT_ADDRESS);
6969 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
10681068 SDValue Callee = CLI.Callee;
10691069 SelectionDAG &DAG = CLI.DAG;
10701070
1071 const Function &Fn = *DAG.getMachineFunction().getFunction();
1071 const Function &Fn = DAG.getMachineFunction().getFunction();
10721072
10731073 StringRef FuncName("");
10741074
10961096
10971097 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
10981098 SelectionDAG &DAG) const {
1099 const Function &Fn = *DAG.getMachineFunction().getFunction();
1099 const Function &Fn = DAG.getMachineFunction().getFunction();
11001100
11011101 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
11021102 SDLoc(Op).getDebugLoc());
11891189 }
11901190 }
11911191
1192 const Function &Fn = *DAG.getMachineFunction().getFunction();
1192 const Function &Fn = DAG.getMachineFunction().getFunction();
11931193 DiagnosticInfoUnsupported BadInit(
11941194 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
11951195 DAG.getContext()->diagnose(BadInit);
152152
153153 int MCOpcode = TII->pseudoToMCOpcode(Opcode);
154154 if (MCOpcode == -1) {
155 LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
155 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
156156 C.emitError("AMDGPUMCInstLower::lower - Pseudo instruction doesn't have "
157157 "a target-specific version: " + Twine(MI->getOpcode()));
158158 }
204204
205205 StringRef Err;
206206 if (!STI.getInstrInfo()->verifyInstruction(*MI, Err)) {
207 LLVMContext &C = MI->getParent()->getParent()->getFunction()->getContext();
207 LLVMContext &C = MI->getParent()->getParent()->getFunction().getContext();
208208 C.emitError("Illegal instruction detected: " + Err);
209209 MI->print(errs());
210210 }
1818 MaxKernArgAlign(0),
1919 LDSSize(0),
2020 ABIArgOffset(0),
21 IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction()->getCallingConv())),
21 IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
2222 NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath) {
2323 // FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
2424 // except reserved size is not correctly aligned.
4242 // Forced to be here by one .inc
4343 const MCPhysReg *SIRegisterInfo::getCalleeSavedRegs(
4444 const MachineFunction *MF) const {
45 CallingConv::ID CC = MF->getFunction()->getCallingConv();
45 CallingConv::ID CC = MF->getFunction().getCallingConv();
4646 switch (CC) {
4747 case CallingConv::C:
4848 case CallingConv::Fast:
467467 }
468468
469469 unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
470 const Function &F = *MF.getFunction();
470 const Function &F = MF.getFunction();
471471 const SIMachineFunctionInfo &MFI = *MF.getInfo();
472472
473473 // Compute maximum number of SGPRs function can use using default/requested
517517 }
518518
519519 unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
520 const Function &F = *MF.getFunction();
520 const Function &F = MF.getFunction();
521521 const SIMachineFunctionInfo &MFI = *MF.getInfo();
522522
523523 // Compute maximum number of VGPRs function can use using default/requested
381381
382382 unsigned getOccupancyWithLocalMemSize(const MachineFunction &MF) const {
383383 const auto *MFI = MF.getInfo();
384 return getOccupancyWithLocalMemSize(MFI->getLDSSize(), *MF.getFunction());
384 return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction());
385385 }
386386
387387 bool hasFP16Denormals() const {
409409 }
410410
411411 bool enableIEEEBit(const MachineFunction &MF) const {
412 return AMDGPU::isCompute(MF.getFunction()->getCallingConv());
412 return AMDGPU::isCompute(MF.getFunction().getCallingConv());
413413 }
414414
415415 bool useFlatForGlobal() const {
481481 }
482482
483483 bool isMesaKernel(const MachineFunction &MF) const {
484 return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction()->getCallingConv());
484 return isMesa3DOS() && !AMDGPU::isShader(MF.getFunction().getCallingConv());
485485 }
486486
487487 // Covers VS/PS/CS graphics shaders
488488 bool isMesaGfxShader(const MachineFunction &MF) const {
489 return isMesa3DOS() && AMDGPU::isShader(MF.getFunction()->getCallingConv());
489 return isMesa3DOS() && AMDGPU::isShader(MF.getFunction().getCallingConv());
490490 }
491491
492492 bool isAmdCodeObjectV2(const MachineFunction &MF) const {
16401640 FuncRep->push_back(DummyExitBlk); //insert to function
16411641 SHOWNEWBLK(DummyExitBlk, "DummyExitBlock to normalize infiniteLoop: ");
16421642 DEBUG(dbgs() << "Old branch instr: " << *BranchMI << "\n";);
1643 LLVMContext &Ctx = LoopHeader->getParent()->getFunction()->getContext();
1643 LLVMContext &Ctx = LoopHeader->getParent()->getFunction().getContext();
16441644 Ctx.emitError("Extra register needed to handle CFG");
16451645 return nullptr;
16461646 }
565565 bool TryMaximizeOccupancy) {
566566 const auto &ST = MF.getSubtarget();
567567 auto TgtOcc = std::min(ST.getOccupancyWithLocalMemSize(MF),
568 ST.getWavesPerEU(*MF.getFunction()).second);
568 ST.getWavesPerEU(MF.getFunction()).second);
569569
570570 sortRegionsByPressure(TgtOcc);
571571 auto Occ = Regions.front()->MaxPressure.getOccupancy(ST);
3636 ST.getOccupancyWithNumVGPRs(VGPRs));
3737 return std::min(MinRegOccupancy,
3838 ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
39 *MF.getFunction()));
39 MF.getFunction()));
4040 }
4141
4242 void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
314314 ST(MF.getSubtarget()),
315315 MFI(*MF.getInfo()),
316316 StartingOccupancy(ST.getOccupancyWithLocalMemSize(MFI.getLDSSize(),
317 *MF.getFunction())),
317 MF.getFunction())),
318318 MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
319319
320320 DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
179179 }
180180
181181 bool R600ClauseMergePass::runOnMachineFunction(MachineFunction &MF) {
182 if (skipFunction(*MF.getFunction()))
182 if (skipFunction(MF.getFunction()))
183183 return false;
184184
185185 const R600Subtarget &ST = MF.getSubtarget();
511511
512512 R600MachineFunctionInfo *MFI = MF.getInfo();
513513
514 CFStack CFStack(ST, MF.getFunction()->getCallingConv());
514 CFStack CFStack(ST, MF.getFunction().getCallingConv());
515515 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
516516 ++MB) {
517517 MachineBasicBlock &MBB = *MB;
518518 unsigned CfCount = 0;
519519 std::vector>> LoopStack;
520520 std::vector IfThenElseStack;
521 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_VS) {
521 if (MF.getFunction().getCallingConv() == CallingConv::AMDGPU_VS) {
522522 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
523523 getHWInstrDesc(CF_CALL_FS));
524524 CfCount++;
196196
197197 bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
198198 const MachineFunction *MF = MI.getParent()->getParent();
199 return !AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
199 return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
200200 usesVertexCache(MI.getOpcode());
201201 }
202202
206206
207207 bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
208208 const MachineFunction *MF = MI.getParent()->getParent();
209 return (AMDGPU::isCompute(MF->getFunction()->getCallingConv()) &&
209 return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
210210 usesVertexCache(MI.getOpcode())) ||
211211 usesTextureCache(MI.getOpcode());
212212 }
335335 }
336336
337337 bool R600VectorRegMerger::runOnMachineFunction(MachineFunction &Fn) {
338 if (skipFunction(*Fn.getFunction()))
338 if (skipFunction(Fn.getFunction()))
339339 return false;
340340
341341 const R600Subtarget &ST = Fn.getSubtarget();
925925 }
926926
927927 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
928 if (skipFunction(*MF.getFunction()))
928 if (skipFunction(MF.getFunction()))
929929 return false;
930930
931931 MRI = &MF.getRegInfo();
393393 // We now have the GIT ptr - now get the scratch descriptor from the entry
394394 // at offset 0.
395395 PointerType *PtrTy =
396 PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
396 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
397397 AMDGPUAS::CONSTANT_ADDRESS);
398398 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
399399 const MCInstrDesc &LoadDwordX4 = TII->get(AMDGPU::S_LOAD_DWORDX4_IMM);
424424 if (MFI->hasImplicitBufferPtr()) {
425425 unsigned Rsrc01 = TRI->getSubReg(ScratchRsrcReg, AMDGPU::sub0_sub1);
426426
427 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
427 if (AMDGPU::isCompute(MF.getFunction().getCallingConv())) {
428428 const MCInstrDesc &Mov64 = TII->get(AMDGPU::S_MOV_B64);
429429
430430 BuildMI(MBB, I, DL, Mov64, Rsrc01)
434434 const MCInstrDesc &LoadDwordX2 = TII->get(AMDGPU::S_LOAD_DWORDX2_IMM);
435435
436436 PointerType *PtrTy =
437 PointerType::get(Type::getInt64Ty(MF.getFunction()->getContext()),
437 PointerType::get(Type::getInt64Ty(MF.getFunction().getContext()),
438438 AMDGPUAS::CONSTANT_ADDRESS);
439439 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
440440 auto MMO = MF.getMachineMemOperand(PtrInfo,
14591459 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
14601460
14611461 MachineFunction &MF = DAG.getMachineFunction();
1462 FunctionType *FType = MF.getFunction()->getFunctionType();
1462 FunctionType *FType = MF.getFunction().getFunctionType();
14631463 SIMachineFunctionInfo *Info = MF.getInfo();
14641464 const SISubtarget &ST = MF.getSubtarget();
14651465
14661466 if (Subtarget->isAmdHsaOS() && AMDGPU::isShader(CallConv)) {
1467 const Function *Fn = MF.getFunction();
1467 const Function &Fn = MF.getFunction();
14681468 DiagnosticInfoUnsupported NoGraphicsHSA(
1469 *Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
1469 Fn, "unsupported non-compute shaders with HSA", DL.getDebugLoc());
14701470 DAG.getContext()->diagnose(NoGraphicsHSA);
14711471 return DAG.getEntryNode();
14721472 }
16951695
16961696 auto &ArgUsageInfo =
16971697 DAG.getPass()->getAnalysis();
1698 ArgUsageInfo.setFuncArgInfo(*MF.getFunction(), Info->getArgInfo());
1698 ArgUsageInfo.setFuncArgInfo(MF.getFunction(), Info->getArgInfo());
16991699
17001700 unsigned StackArgSize = CCInfo.getNextStackOffset();
17011701 Info->setBytesInStackArgArea(StackArgSize);
20312031 return false;
20322032
20332033 MachineFunction &MF = DAG.getMachineFunction();
2034 const Function *CallerF = MF.getFunction();
2035 CallingConv::ID CallerCC = CallerF->getCallingConv();
2034 const Function &CallerF = MF.getFunction();
2035 CallingConv::ID CallerCC = CallerF.getCallingConv();
20362036 const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
20372037 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
20382038
20532053 if (IsVarArg)
20542054 return false;
20552055
2056 for (const Argument &Arg : CallerF->args()) {
2056 for (const Argument &Arg : CallerF.args()) {
20572057 if (Arg.hasByValAttr())
20582058 return false;
20592059 }
35933593 case SISubtarget::TrapIDLLVMTrap:
35943594 return DAG.getNode(AMDGPUISD::ENDPGM, SL, MVT::Other, Chain);
35953595 case SISubtarget::TrapIDLLVMDebugTrap: {
3596 DiagnosticInfoUnsupported NoTrap(*MF.getFunction(),
3596 DiagnosticInfoUnsupported NoTrap(MF.getFunction(),
35973597 "debugtrap handler not supported",
35983598 Op.getDebugLoc(),
35993599 DS_Warning);
3600 LLVMContext &Ctx = MF.getFunction()->getContext();
3600 LLVMContext &Ctx = MF.getFunction().getContext();
36013601 Ctx.diagnose(NoTrap);
36023602 return Chain;
36033603 }
37103710
37113711 const MachineFunction &MF = DAG.getMachineFunction();
37123712 DiagnosticInfoUnsupported InvalidAddrSpaceCast(
3713 *MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
3713 MF.getFunction(), "invalid addrspacecast", SL.getDebugLoc());
37143714 DAG.getContext()->diagnose(InvalidAddrSpaceCast);
37153715
37163716 return DAG.getUNDEF(ASC->getValueType(0));
39123912
39133913 static SDValue emitNonHSAIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
39143914 EVT VT) {
3915 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
3915 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
39163916 "non-hsa intrinsic with hsa target",
39173917 DL.getDebugLoc());
39183918 DAG.getContext()->diagnose(BadIntrin);
39213921
39223922 static SDValue emitRemovedIntrinsicError(SelectionDAG &DAG, const SDLoc &DL,
39233923 EVT VT) {
3924 DiagnosticInfoUnsupported BadIntrin(*DAG.getMachineFunction().getFunction(),
3924 DiagnosticInfoUnsupported BadIntrin(DAG.getMachineFunction().getFunction(),
39253925 "intrinsic not supported on subtarget",
39263926 DL.getDebugLoc());
39273927 DAG.getContext()->diagnose(BadIntrin);
39503950 case Intrinsic::amdgcn_queue_ptr: {
39513951 if (!Subtarget->isAmdCodeObjectV2(MF)) {
39523952 DiagnosticInfoUnsupported BadIntrin(
3953 *MF.getFunction(), "unsupported hsa intrinsic without hsa target",
3953 MF.getFunction(), "unsupported hsa intrinsic without hsa target",
39543954 DL.getDebugLoc());
39553955 DAG.getContext()->diagnose(BadIntrin);
39563956 return DAG.getUNDEF(VT);
41284128 return SDValue();
41294129
41304130 DiagnosticInfoUnsupported BadIntrin(
4131 *MF.getFunction(), "intrinsic not supported on subtarget",
4131 MF.getFunction(), "intrinsic not supported on subtarget",
41324132 DL.getDebugLoc());
41334133 DAG.getContext()->diagnose(BadIntrin);
41344134 return DAG.getUNDEF(VT);
45584558 case Intrinsic::amdgcn_s_barrier: {
45594559 if (getTargetMachine().getOptLevel() > CodeGenOpt::None) {
45604560 const SISubtarget &ST = MF.getSubtarget();
4561 unsigned WGSize = ST.getFlatWorkGroupSizes(*MF.getFunction()).second;
4561 unsigned WGSize = ST.getFlatWorkGroupSizes(MF.getFunction()).second;
45624562 if (WGSize <= ST.getWavefrontSize())
45634563 return SDValue(DAG.getMachineNode(AMDGPU::WAVE_BARRIER, DL, MVT::Other,
45644564 Op.getOperand(0)), 0);
165165 MachineBasicBlock &MBB = *MI.getParent();
166166 MachineFunction *MF = MBB.getParent();
167167
168 if (MF->getFunction()->getCallingConv() != CallingConv::AMDGPU_PS ||
168 if (MF->getFunction().getCallingConv() != CallingConv::AMDGPU_PS ||
169169 !shouldSkip(MBB, MBB.getParent()->back()))
170170 return false;
171171
374374 if (!Base1 || !Base2)
375375 return false;
376376 const MachineFunction &MF = *MI1.getParent()->getParent();
377 const DataLayout &DL = MF.getFunction()->getParent()->getDataLayout();
377 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout();
378378 Base1 = GetUnderlyingObject(Base1, DL);
379379 Base2 = GetUnderlyingObject(Base1, DL);
380380
441441 const DebugLoc &DL, unsigned DestReg,
442442 unsigned SrcReg, bool KillSrc) {
443443 MachineFunction *MF = MBB.getParent();
444 DiagnosticInfoUnsupported IllegalCopy(*MF->getFunction(),
444 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(),
445445 "illegal SGPR to VGPR copy",
446446 DL, DS_Error);
447 LLVMContext &C = MF->getFunction()->getContext();
447 LLVMContext &C = MF->getFunction().getContext();
448448 C.diagnose(IllegalCopy);
449449
450450 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg)
872872 return;
873873 }
874874
875 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
876 LLVMContext &Ctx = MF->getFunction()->getContext();
875 if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
876 LLVMContext &Ctx = MF->getFunction().getContext();
877877 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to"
878878 " spill register");
879879 BuildMI(MBB, MI, DL, get(AMDGPU::KILL))
974974 return;
975975 }
976976
977 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) {
978 LLVMContext &Ctx = MF->getFunction()->getContext();
977 if (!ST.isVGPRSpillingEnabled(MF->getFunction())) {
978 LLVMContext &Ctx = MF->getFunction().getContext();
979979 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to"
980980 " restore register");
981981 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg);
10161016 if (TIDReg == AMDGPU::NoRegister)
10171017 return TIDReg;
10181018
1019 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) &&
1019 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) &&
10201020 WorkGroupSize > WavefrontSize) {
10211021 unsigned TIDIGXReg
10221022 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X);
34433443 // scratch memory access. In both cases, the legalization never involves
34443444 // conversion to the addr64 form.
34453445 if (isMIMG(MI) ||
3446 (AMDGPU::isShader(MF.getFunction()->getCallingConv()) &&
3446 (AMDGPU::isShader(MF.getFunction().getCallingConv()) &&
34473447 (isMUBUF(MI) || isMTBUF(MI)))) {
34483448 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc);
34493449 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
912912 }
913913
914914 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
915 if (skipFunction(*MF.getFunction()))
915 if (skipFunction(MF.getFunction()))
916916 return false;
917917
918918 STM = &MF.getSubtarget();
5050 ImplicitArgPtr(false),
5151 GITPtrHigh(0xffffffff) {
5252 const SISubtarget &ST = MF.getSubtarget();
53 const Function *F = MF.getFunction();
54 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(*F);
55 WavesPerEU = ST.getWavesPerEU(*F);
53 const Function &F = MF.getFunction();
54 FlatWorkGroupSizes = ST.getFlatWorkGroupSizes(F);
55 WavesPerEU = ST.getWavesPerEU(F);
5656
5757 if (!isEntryFunction()) {
5858 // Non-entry functions have no special inputs for now, other registers
6767 ArgInfo.PrivateSegmentWaveByteOffset =
68