llvm.org GIT mirror llvm / 714e2a9
[MachinePiepliner] Don't check boundary node in checkValidNodeOrder This was exposed by PowerPC target enablement. In ScheduleDAG, if we haven't seen any uses in this scheduling region, we will create a dependence edge to ExitSU to model the live-out latency. This is required for vreg defs with no in-region use, and prefetches with no vreg def. When we build NodeOrder in Scheduler, we ignore these boundary nodes. However, when we check Succs in checkValidNodeOrder, we did not skip them, so we still assume all the nodes have been sorted and in order in Indices array. So when we call lower_bound() for ExitSU, it will return Indices.end(), causing memory issues in following Node access. Differential Revision: https://reviews.llvm.org/D63282 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363329 91177308-0d34-0410-b5e6-96231b3b80d8 Jinsong Ji 3 months ago
2 changed file(s) with 83 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
37313731
37323732 for (SDep &SuccEdge : SU->Succs) {
37333733 SUnit *SuccSU = SuccEdge.getSUnit();
3734 // Do not process a boundary node, it was not included in NodeOrder,
3735 // hence not in Indices either, call to std::lower_bound() below will
3736 // return Indices.end().
3737 if (SuccSU->isBoundaryNode())
3738 continue;
37343739 unsigned SuccIndex =
37353740 std::get<1>(*std::lower_bound(Indices.begin(), Indices.end(),
37363741 std::make_pair(SuccSU, 0), CompareKey));
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc < %s -ppc-vsr-nums-as-vr -mtriple=powerpc64-unknown-linux-gnu \
2 ; RUN: -verify-machineinstrs -ppc-asm-full-reg-names -mcpu=pwr9 --ppc-enable-pipeliner \
3 ; RUN: | FileCheck %s
4
5 @x = dso_local local_unnamed_addr global <{ i32, i32, i32, i32, [1020 x i32] }> <{ i32 1, i32 2, i32 3, i32 4, [1020 x i32] zeroinitializer }>, align 4
6 @y = common dso_local global [1024 x i32] zeroinitializer, align 4
7
8 ; Function Attrs: norecurse nounwind
9 define dso_local i32* @foo() local_unnamed_addr #0 {
10 ; CHECK-LABEL: foo:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: addis r5, r2, x@toc@ha
13 ; CHECK-NEXT: addi r5, r5, x@toc@l
14 ; CHECK-NEXT: addis r6, r2, y@toc@ha
15 ; CHECK-NEXT: li r7, 340
16 ; CHECK-NEXT: addi r3, r6, y@toc@l
17 ; CHECK-NEXT: lwz r6, y@toc@l(r6)
18 ; CHECK-NEXT: mtctr r7
19 ; CHECK-NEXT: addi r5, r5, -8
20 ; CHECK-NEXT: lwzu r7, 12(r5)
21 ; CHECK-NEXT: maddld r6, r7, r7, r6
22 ; CHECK-NEXT: lwz r7, 4(r5)
23 ; CHECK-NEXT: addi r4, r3, -8
24 ; CHECK-NEXT: stwu r6, 12(r4)
25 ; CHECK-NEXT: maddld r6, r7, r7, r6
26 ; CHECK-NEXT: lwz r7, 8(r5)
27 ; CHECK-NEXT: .p2align 4
28 ; CHECK-NEXT: .LBB0_1: # %for.body
29 ; CHECK: maddld r7, r7, r7, r6
30 ; CHECK-NEXT: lwzu r8, 12(r5)
31 ; CHECK-NEXT: maddld r8, r8, r8, r7
32 ; CHECK-NEXT: stw r6, 4(r4)
33 ; CHECK-NEXT: lwz r6, 4(r5)
34 ; CHECK-NEXT: maddld r6, r6, r6, r8
35 ; CHECK-NEXT: stw r7, 8(r4)
36 ; CHECK-NEXT: lwz r7, 8(r5)
37 ; CHECK-NEXT: stwu r8, 12(r4)
38 ; CHECK-NEXT: bdnz .LBB0_1
39 ; CHECK-NEXT: # %bb.2:
40 ; CHECK-NEXT: maddld r5, r7, r7, r6
41 ; CHECK-NEXT: stw r6, 4(r4)
42 ; CHECK-NEXT: stw r5, 8(r4)
43 ; CHECK-NEXT: blr
44 entry:
45 %.pre = load i32, i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0), align 4
46 br label %for.body
47
48 for.cond.cleanup: ; preds = %for.body
49 ret i32* getelementptr inbounds ([1024 x i32], [1024 x i32]* @y, i64 0, i64 0)
50
51 for.body: ; preds = %for.body, %entry
52 %0 = phi i32 [ %.pre, %entry ], [ %add.2, %for.body ]
53 %indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next.2, %for.body ]
54 %arrayidx2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv
55 %1 = load i32, i32* %arrayidx2, align 4
56 %mul = mul nsw i32 %1, %1
57 %add = add nsw i32 %mul, %0
58 %arrayidx6 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv
59 store i32 %add, i32* %arrayidx6, align 4
60 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
61 %arrayidx2.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next
62 %2 = load i32, i32* %arrayidx2.1, align 4
63 %mul.1 = mul nsw i32 %2, %2
64 %add.1 = add nsw i32 %mul.1, %add
65 %arrayidx6.1 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next
66 store i32 %add.1, i32* %arrayidx6.1, align 4
67 %indvars.iv.next.1 = add nuw nsw i64 %indvars.iv, 2
68 %arrayidx2.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* bitcast (<{ i32, i32, i32, i32, [1020 x i32] }>* @x to [1024 x i32]*), i64 0, i64 %indvars.iv.next.1
69 %3 = load i32, i32* %arrayidx2.2, align 4
70 %mul.2 = mul nsw i32 %3, %3
71 %add.2 = add nsw i32 %mul.2, %add.1
72 %arrayidx6.2 = getelementptr inbounds [1024 x i32], [1024 x i32]* @y, i64 0, i64 %indvars.iv.next.1
73 store i32 %add.2, i32* %arrayidx6.2, align 4
74 %indvars.iv.next.2 = add nuw nsw i64 %indvars.iv, 3
75 %exitcond.2 = icmp eq i64 %indvars.iv.next.2, 1024
76 br i1 %exitcond.2, label %for.cond.cleanup, label %for.body
77 }