llvm.org GIT mirror llvm / 1733b9a
StackProtector: Use PointerMayBeCaptured This was using its own, outdated list of possible captures. This was at minimum not catching cmpxchg and addrspacecast captures. One change is now any volatile access is treated as capturing. The test coverage for this pass is quite inadequate, but this required removing volatile in the lifetime capture test. Also fixes some infrastructure issues to allow running just the IR pass. Fixes bug 42238. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363169 91177308-0d34-0410-b5e6-96231b3b80d8 Matt Arsenault 2 months ago
6 changed file(s) with 149 addition(s) and 43 deletion(s). Raw diff Collapse all Expand all
6060 /// protection when -fstack-protection is used.
6161 unsigned SSPBufferSize = 0;
6262
63 /// VisitedPHIs - The set of PHI nodes visited when determining
64 /// if a variable's reference has been taken. This set
65 /// is maintained to ensure we don't visit the same PHI node multiple
66 /// times.
67 SmallPtrSet VisitedPHIs;
68
6963 // A prologue is generated.
7064 bool HasPrologue = false;
7165
1616 #include "llvm/ADT/SmallPtrSet.h"
1717 #include "llvm/ADT/Statistic.h"
1818 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/CaptureTracking.h"
1920 #include "llvm/Analysis/EHPersonalities.h"
2021 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
2122 #include "llvm/CodeGen/Passes.h"
153154 }
154155
155156 return NeedsProtector;
156 }
157
158 bool StackProtector::HasAddressTaken(const Instruction *AI) {
159 for (const User *U : AI->users()) {
160 if (const StoreInst *SI = dyn_cast(U)) {
161 if (AI == SI->getValueOperand())
162 return true;
163 } else if (const PtrToIntInst *SI = dyn_cast(U)) {
164 if (AI == SI->getOperand(0))
165 return true;
166 } else if (const CallInst *CI = dyn_cast(U)) {
167 // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
168 if (!isa(CI) && !CI->isLifetimeStartOrEnd())
169 return true;
170 } else if (isa(U)) {
171 return true;
172 } else if (const SelectInst *SI = dyn_cast(U)) {
173 if (HasAddressTaken(SI))
174 return true;
175 } else if (const PHINode *PN = dyn_cast(U)) {
176 // Keep track of what PHI nodes we have already visited to ensure
177 // they are only visited once.
178 if (VisitedPHIs.insert(PN).second)
179 if (HasAddressTaken(PN))
180 return true;
181 } else if (const GetElementPtrInst *GEP = dyn_cast(U)) {
182 if (HasAddressTaken(GEP))
183 return true;
184 } else if (const BitCastInst *BI = dyn_cast(U)) {
185 if (HasAddressTaken(BI))
186 return true;
187 }
188 }
189 return false;
190157 }
191158
192159 /// Search for the first call to the llvm.stackprotector intrinsic and return it
296263 continue;
297264 }
298265
299 if (Strong && HasAddressTaken(AI)) {
266 if (Strong && PointerMayBeCaptured(AI,
267 /* ReturnCaptures */ false,
268 /* StoreCaptures */ true)) {
300269 ++NumAddrTaken;
301270 Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
302271 ORE.emit([&]() {
40864086 %1 = alloca i32, align 4
40874087 %2 = bitcast i32* %1 to i8*
40884088 call void @llvm.lifetime.start.p0i8(i64 4, i8* nonnull %2)
4089 store volatile i32 1, i32* %1, align 4
4090 %3 = load volatile i32, i32* %1, align 4
4089 store i32 1, i32* %1, align 4
4090 %3 = load i32, i32* %1, align 4
40914091 %4 = mul nsw i32 %3, 42
40924092 call void @llvm.lifetime.end.p0i8(i64 4, i8* nonnull %2)
40934093 ret i32 %4
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -stack-protector < %s | FileCheck %s
2 ; Bug 42238: Test some situations missed by old, custom capture tracking.
3
4 define void @store_captures() #0 {
5 ; CHECK-LABEL: @store_captures(
6 ; CHECK-NEXT: entry:
7 ; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
8 ; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
9 ; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
10 ; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
11 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
12 ; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
13 ; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
14 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
15 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
16 ; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
17 ; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
18 ; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
19 ; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
20 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
21 ; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
22 ; CHECK: SP_return:
23 ; CHECK-NEXT: ret void
24 ; CHECK: CallStackCheckFailBlk:
25 ; CHECK-NEXT: call void @__stack_chk_fail()
26 ; CHECK-NEXT: unreachable
27 ;
28 entry:
29 %retval = alloca i32, align 4
30 %a = alloca i32, align 4
31 %j = alloca i32*, align 8
32 store i32 0, i32* %retval
33 %load = load i32, i32* %a, align 4
34 %add = add nsw i32 %load, 1
35 store i32 %add, i32* %a, align 4
36 store i32* %a, i32** %j, align 8
37 ret void
38 }
39
40 define i32* @return_captures() #0 {
41 ; CHECK-LABEL: @return_captures(
42 ; CHECK-NEXT: entry:
43 ; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
44 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
45 ; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
46 ; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
47 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
48 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
49 ; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
50 ; CHECK-NEXT: ret i32* [[A]]
51 ;
52 entry:
53 %retval = alloca i32, align 4
54 %a = alloca i32, align 4
55 %j = alloca i32*, align 8
56 store i32 0, i32* %retval
57 %load = load i32, i32* %a, align 4
58 %add = add nsw i32 %load, 1
59 store i32 %add, i32* %a, align 4
60 ret i32* %a
61 }
62
63 define void @store_addrspacecast_captures() #0 {
64 ; CHECK-LABEL: @store_addrspacecast_captures(
65 ; CHECK-NEXT: entry:
66 ; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
67 ; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
68 ; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
69 ; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
70 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
71 ; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
72 ; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
73 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
74 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
75 ; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
76 ; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
77 ; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
78 ; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
79 ; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
80 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
81 ; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
82 ; CHECK: SP_return:
83 ; CHECK-NEXT: ret void
84 ; CHECK: CallStackCheckFailBlk:
85 ; CHECK-NEXT: call void @__stack_chk_fail()
86 ; CHECK-NEXT: unreachable
87 ;
88 entry:
89 %retval = alloca i32, align 4
90 %a = alloca i32, align 4
91 %j = alloca i32 addrspace(1)*, align 8
92 store i32 0, i32* %retval
93 %load = load i32, i32* %a, align 4
94 %add = add nsw i32 %load, 1
95 store i32 %add, i32* %a, align 4
96 %a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
97 store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
98 ret void
99 }
100
101 define void @cmpxchg_captures() #0 {
102 ; CHECK-LABEL: @cmpxchg_captures(
103 ; CHECK-NEXT: entry:
104 ; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
105 ; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
106 ; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
107 ; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
108 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
109 ; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
110 ; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
111 ; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
112 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
113 ; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
114 ; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* [[A]], i32* null seq_cst monotonic
115 ; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
116 ; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
117 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
118 ; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
119 ; CHECK: SP_return:
120 ; CHECK-NEXT: ret void
121 ; CHECK: CallStackCheckFailBlk:
122 ; CHECK-NEXT: call void @__stack_chk_fail()
123 ; CHECK-NEXT: unreachable
124 ;
125 entry:
126 %retval = alloca i32, align 4
127 %a = alloca i32, align 4
128 %j = alloca i32*, align 8
129 store i32 0, i32* %retval
130 %load = load i32, i32* %a, align 4
131 %add = add nsw i32 %load, 1
132 store i32 %add, i32* %a, align 4
133
134 cmpxchg i32** %j, i32* %a, i32* null seq_cst monotonic
135 ret void
136 }
137
138 attributes #0 = { sspstrong }
0 if not 'X86' in config.root.targets:
1 config.unsupported = True
2
516516 initializeDwarfEHPreparePass(Registry);
517517 initializeSafeStackLegacyPassPass(Registry);
518518 initializeSjLjEHPreparePass(Registry);
519 initializeStackProtectorPass(Registry);
519520 initializePreISelIntrinsicLoweringLegacyPassPass(Registry);
520521 initializeGlobalMergePass(Registry);
521522 initializeIndirectBrExpandPassPass(Registry);