llvm.org GIT mirror llvm / 7fccc80
[MCA] Improved handling of in-order issue/dispatch resources. Added field 'MustIssueImmediately' to the instruction descriptor of instructions that only consume in-order issue/dispatch processor resources. This speeds up queries from the hardware Scheduler, and gives an average ~5% speedup on a release build. No functional change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350397 91177308-0d34-0410-b5e6-96231b3b80d8 Andrea Di Biagio 1 year, 10 months ago
5 changed file(s) with 19 addition(s) and 25 deletion(s). Raw diff Collapse all Expand all
388388 // Release a previously reserved processor resource.
389389 void releaseResource(uint64_t ResourceID);
390390
391 // Returns true if all resources are in-order, and there is at least one
392 // resource which is a dispatch hazard (BufferSize = 0).
393 bool mustIssueImmediately(const InstrDesc &Desc) const;
394
395391 bool canBeIssued(const InstrDesc &Desc) const;
396392
397393 void issueInstruction(
336336 bool BeginGroup;
337337 bool EndGroup;
338338
339 // True if all buffered resources are in-order, and there is at least one
340 // buffer which is a dispatch hazard (BufferSize = 0).
341 bool MustIssueImmediately;
342
339343 // A zero latency instruction doesn't consume any scheduler resources.
340344 bool isZeroLatency() const { return !MaxLatency && Resources.empty(); }
341345
264264 unsigned Index = getResourceStateIndex(E.first);
265265 return Resources[Index]->isReady(NumUnits);
266266 });
267 }
268
269 // Returns true if all resources are in-order, and there is at least one
270 // resource which is a dispatch hazard (BufferSize = 0).
271 bool ResourceManager::mustIssueImmediately(const InstrDesc &Desc) const {
272 if (!canBeIssued(Desc))
273 return false;
274 bool AllInOrderResources = all_of(Desc.Buffers, [&](uint64_t BufferMask) {
275 unsigned Index = getResourceStateIndex(BufferMask);
276 const ResourceState &Resource = *Resources[Index];
277 return Resource.isInOrder() || Resource.isADispatchHazard();
278 });
279 if (!AllInOrderResources)
280 return false;
281
282 return any_of(Desc.Buffers, [&](uint64_t BufferMask) {
283 return Resources[getResourceStateIndex(BufferMask)]->isADispatchHazard();
284 });
285267 }
286268
287269 void ResourceManager::issueInstruction(
198198 }
199199
200200 bool Scheduler::mustIssueImmediately(const InstRef &IR) const {
201 const InstrDesc &Desc = IR.getInstruction()->getDesc();
202 if (Desc.isZeroLatency())
203 return true;
201204 // Instructions that use an in-order dispatch/issue processor resource must be
202205 // issued immediately to the pipeline(s). Any other in-order buffered
203206 // resources (i.e. BufferSize=1) is consumed.
204 const InstrDesc &Desc = IR.getInstruction()->getDesc();
205 return Desc.isZeroLatency() || Resources->mustIssueImmediately(Desc);
207 return Desc.MustIssueImmediately;
206208 }
207209
208210 void Scheduler::dispatch(const InstRef &IR) {
5858 unsigned NumProcResources = SM.getNumProcResourceKinds();
5959 APInt Buffers(NumProcResources, 0);
6060
61 bool AllInOrderResources = true;
62 bool AnyDispatchHazards = false;
6163 for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
6264 const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
6365 const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
6466 uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
65 if (PR.BufferSize != -1)
67 if (PR.BufferSize < 0) {
68 AllInOrderResources = false;
69 } else {
6670 Buffers.setBit(PRE->ProcResourceIdx);
71 AnyDispatchHazards |= (PR.BufferSize == 0);
72 AllInOrderResources &= (PR.BufferSize <= 1);
73 }
74
6775 CycleSegment RCy(0, PRE->Cycles, false);
6876 Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
6977 if (PR.SuperIdx) {
7179 SuperResources[Super] += PRE->Cycles;
7280 }
7381 }
82
83 ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
7484
7585 // Sort elements by mask popcount, so that we prioritize resource units over
7686 // resource groups, and smaller groups over larger groups.