llvm.org GIT mirror llvm / 7ffec83
Protection against stack-based memory corruption errors using SafeStack This patch adds the safe stack instrumentation pass to LLVM, which separates the program stack into a safe stack, which stores return addresses, register spills, and local variables that are statically verified to be accessed in a safe way, and the unsafe stack, which stores everything else. Such separation makes it much harder for an attacker to corrupt objects on the safe stack, including function pointers stored in spilled registers and return addresses. You can find more information about the safe stack, as well as other parts of or control-flow hijack protection technique in our OSDI paper on code-pointer integrity (http://dslab.epfl.ch/pubs/cpi.pdf) and our project website (http://levee.epfl.ch). The overhead of our implementation of the safe stack is very close to zero (0.01% on the Phoronix benchmarks). This is lower than the overhead of stack cookies, which are supported by LLVM and are commonly used today, yet the security guarantees of the safe stack are strictly stronger than stack cookies. In some cases, the safe stack improves performance due to better cache locality. Our current implementation of the safe stack is stable and robust, we used it to recompile multiple projects on Linux including Chromium, and we also recompiled the entire FreeBSD user-space system and more than 100 packages. We ran unit tests on the FreeBSD system and many of the packages and observed no errors caused by the safe stack. The safe stack is also fully binary compatible with non-instrumented code and can be applied to parts of a program selectively. This patch is our implementation of the safe stack on top of LLVM. The patches make the following changes: - Add the safestack function attribute, similar to the ssp, sspstrong and sspreq attributes. - Add the SafeStack instrumentation pass that applies the safe stack to all functions that have the safestack attribute. This pass moves all unsafe local variables to the unsafe stack with a separate stack pointer, whereas all safe variables remain on the regular stack that is managed by LLVM as usual. - Invoke the pass as the last stage before code generation (at the same time the existing cookie-based stack protector pass is invoked). - Add unit tests for the safe stack. Original patch by Volodymyr Kuznetsov and others at the Dependable Systems Lab at EPFL; updates and upstreaming by myself. Differential Revision: http://reviews.llvm.org/D6094 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239761 91177308-0d34-0410-b5e6-96231b3b80d8 Peter Collingbourne 5 years ago
48 changed file(s) with 1417 addition(s) and 4 deletion(s). Raw diff Collapse all Expand all
13181318 ``setjmp`` is an example of such a function. The compiler disables
13191319 some optimizations (like tail calls) in the caller of these
13201320 functions.
1321 ``safestack``
1322 This attribute indicates that
1323 `SafeStack `_
1324 protection is enabled for this function.
1325
1326 If a function that has a ``safestack`` attribute is inlined into a
1327 function that doesn't have a ``safestack`` attribute or which has an
1328 ``ssp``, ``sspstrong`` or ``sspreq`` attribute, then the resulting
1329 function will have a ``safestack`` attribute.
13211330 ``sanitize_address``
13221331 This attribute indicates that AddressSanitizer checks
13231332 (dynamic address safety analysis) are enabled for this function.
402402 ATTR_KIND_JUMP_TABLE = 40,
403403 ATTR_KIND_DEREFERENCEABLE = 41,
404404 ATTR_KIND_DEREFERENCEABLE_OR_NULL = 42,
405 ATTR_KIND_CONVERGENT = 43
405 ATTR_KIND_CONVERGENT = 43,
406 ATTR_KIND_SAFESTACK = 44,
406407 };
407408
408409 enum ComdatSelectionKindCodes {
107107 StackProtect, ///< Stack protection.
108108 StackProtectReq, ///< Stack protection required.
109109 StackProtectStrong, ///< Strong Stack protection.
110 SafeStack, ///< Safe Stack protection.
110111 StructRet, ///< Hidden pointer to structure to return
111112 SanitizeAddress, ///< AddressSanitizer is on.
112113 SanitizeThread, ///< ThreadSanitizer is on.
241241 void initializeRegionPrinterPass(PassRegistry&);
242242 void initializeRegionViewerPass(PassRegistry&);
243243 void initializeRewriteStatepointsForGCPass(PassRegistry&);
244 void initializeSafeStackPass(PassRegistry&);
244245 void initializeSCCPPass(PassRegistry&);
245246 void initializeSROAPass(PassRegistry&);
246247 void initializeSROA_DTPass(PassRegistry&);
130130 (void) llvm::createRegionPrinterPass();
131131 (void) llvm::createRegionViewerPass();
132132 (void) llvm::createSCCPPass();
133 (void) llvm::createSafeStackPass();
133134 (void) llvm::createScalarReplAggregatesPass();
134135 (void) llvm::createSingleLoopExtractorPass();
135136 (void) llvm::createStripSymbolsPass();
131131 // checking on loads, stores, and other memory intrinsics.
132132 FunctionPass *createBoundsCheckingPass();
133133
134 /// \brief This pass splits the stack into a safe stack and an unsafe stack to
135 /// protect against stack-based overflow vulnerabilities.
136 FunctionPass *createSafeStackPass();
137
134138 } // End llvm namespace
135139
136140 #endif
627627 KEYWORD(ssp);
628628 KEYWORD(sspreq);
629629 KEYWORD(sspstrong);
630 KEYWORD(safestack);
630631 KEYWORD(sanitize_address);
631632 KEYWORD(sanitize_thread);
632633 KEYWORD(sanitize_memory);
957957 case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break;
958958 case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break;
959959 case lltok::kw_sspstrong: B.addAttribute(Attribute::StackProtectStrong); break;
960 case lltok::kw_safestack: B.addAttribute(Attribute::SafeStack); break;
960961 case lltok::kw_sanitize_address: B.addAttribute(Attribute::SanitizeAddress); break;
961962 case lltok::kw_sanitize_thread: B.addAttribute(Attribute::SanitizeThread); break;
962963 case lltok::kw_sanitize_memory: B.addAttribute(Attribute::SanitizeMemory); break;
12661267 case lltok::kw_ssp:
12671268 case lltok::kw_sspreq:
12681269 case lltok::kw_sspstrong:
1270 case lltok::kw_safestack:
12691271 case lltok::kw_uwtable:
12701272 HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
12711273 break;
13421344 case lltok::kw_ssp:
13431345 case lltok::kw_sspreq:
13441346 case lltok::kw_sspstrong:
1347 case lltok::kw_safestack:
13451348 case lltok::kw_uwtable:
13461349 HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
13471350 break;
134134 kw_ssp,
135135 kw_sspreq,
136136 kw_sspstrong,
137 kw_safestack,
137138 kw_sret,
138139 kw_sanitize_thread,
139140 kw_sanitize_memory,
11451145 return Attribute::StackProtectReq;
11461146 case bitc::ATTR_KIND_STACK_PROTECT_STRONG:
11471147 return Attribute::StackProtectStrong;
1148 case bitc::ATTR_KIND_SAFESTACK:
1149 return Attribute::SafeStack;
11481150 case bitc::ATTR_KIND_STRUCT_RET:
11491151 return Attribute::StructRet;
11501152 case bitc::ATTR_KIND_SANITIZE_ADDRESS:
231231 return bitc::ATTR_KIND_STACK_PROTECT_REQ;
232232 case Attribute::StackProtectStrong:
233233 return bitc::ATTR_KIND_STACK_PROTECT_STRONG;
234 case Attribute::SafeStack:
235 return bitc::ATTR_KIND_SAFESTACK;
234236 case Attribute::StructRet:
235237 return bitc::ATTR_KIND_STRUCT_RET;
236238 case Attribute::SanitizeAddress:
2121 type = Library
2222 name = CodeGen
2323 parent = Libraries
24 required_libraries = Analysis Core MC Scalar Support Target TransformUtils
24 required_libraries = Analysis Core Instrumentation MC Scalar Support Target TransformUtils
2323 #include "llvm/Support/Debug.h"
2424 #include "llvm/Support/ErrorHandling.h"
2525 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Transforms/Instrumentation.h"
2627 #include "llvm/Transforms/Scalar.h"
2728 #include "llvm/Transforms/Utils/SymbolRewriter.h"
2829
455456 void TargetPassConfig::addISelPrepare() {
456457 addPreISel();
457458
459 // Add both the safe stack and the stack protection passes: each of them will
460 // only protect functions that have corresponding attributes.
461 addPass(createSafeStackPass());
458462 addPass(createStackProtectorPass(TM));
459463
460464 if (PrintISelInput)
251251 return "sspreq";
252252 if (hasAttribute(Attribute::StackProtectStrong))
253253 return "sspstrong";
254 if (hasAttribute(Attribute::SafeStack))
255 return "safestack";
254256 if (hasAttribute(Attribute::StructRet))
255257 return "sret";
256258 if (hasAttribute(Attribute::SanitizeThread))
436438 case Attribute::NonNull: return 1ULL << 44;
437439 case Attribute::JumpTable: return 1ULL << 45;
438440 case Attribute::Convergent: return 1ULL << 46;
441 case Attribute::SafeStack: return 1ULL << 47;
439442 case Attribute::Dereferenceable:
440443 llvm_unreachable("dereferenceable attribute not supported in raw format");
441444 break;
12501250 I->getKindAsEnum() == Attribute::StackProtect ||
12511251 I->getKindAsEnum() == Attribute::StackProtectReq ||
12521252 I->getKindAsEnum() == Attribute::StackProtectStrong ||
1253 I->getKindAsEnum() == Attribute::SafeStack ||
12531254 I->getKindAsEnum() == Attribute::NoRedZone ||
12541255 I->getKindAsEnum() == Attribute::NoImplicitFloat ||
12551256 I->getKindAsEnum() == Attribute::Naked ||
512512 HANDLE_ATTR(StackProtect);
513513 HANDLE_ATTR(StackProtectReq);
514514 HANDLE_ATTR(StackProtectStrong);
515 HANDLE_ATTR(SafeStack);
515516 HANDLE_ATTR(NoCapture);
516517 HANDLE_ATTR(NoRedZone);
517518 HANDLE_ATTR(NoImplicitFloat);
9292 // clutter to the IR.
9393 AttrBuilder B;
9494 B.addAttribute(Attribute::StackProtect)
95 .addAttribute(Attribute::StackProtectStrong);
95 .addAttribute(Attribute::StackProtectStrong)
96 .addAttribute(Attribute::StackProtectReq);
9697 AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(),
9798 AttributeSet::FunctionIndex,
9899 B);
99100
100 if (Callee->hasFnAttribute(Attribute::StackProtectReq)) {
101 if (Callee->hasFnAttribute(Attribute::SafeStack)) {
102 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
103 Caller->addFnAttr(Attribute::SafeStack);
104 } else if (Callee->hasFnAttribute(Attribute::StackProtectReq) &&
105 !Caller->hasFnAttribute(Attribute::SafeStack)) {
101106 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
102107 Caller->addFnAttr(Attribute::StackProtectReq);
103108 } else if (Callee->hasFnAttribute(Attribute::StackProtectStrong) &&
109 !Caller->hasFnAttribute(Attribute::SafeStack) &&
104110 !Caller->hasFnAttribute(Attribute::StackProtectReq)) {
105111 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
106112 Caller->addFnAttr(Attribute::StackProtectStrong);
107113 } else if (Callee->hasFnAttribute(Attribute::StackProtect) &&
114 !Caller->hasFnAttribute(Attribute::SafeStack) &&
108115 !Caller->hasFnAttribute(Attribute::StackProtectReq) &&
109116 !Caller->hasFnAttribute(Attribute::StackProtectStrong))
110117 Caller->addFnAttr(Attribute::StackProtect);
55 MemorySanitizer.cpp
66 Instrumentation.cpp
77 InstrProfiling.cpp
8 SafeStack.cpp
89 SanitizerCoverage.cpp
910 ThreadSanitizer.cpp
1011
2929 initializeThreadSanitizerPass(Registry);
3030 initializeSanitizerCoverageModulePass(Registry);
3131 initializeDataFlowSanitizerPass(Registry);
32 initializeSafeStackPass(Registry);
3233 }
3334
3435 /// LLVMInitializeInstrumentation - C binding for
0 //===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass splits the stack into the safe stack (kept as-is for LLVM backend)
10 // and the unsafe stack (explicitly allocated and managed through the runtime
11 // support library).
12 //
13 // http://clang.llvm.org/docs/SafeStack.html
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "llvm/Transforms/Instrumentation.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/ADT/Triple.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Pass.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/Format.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_os_ostream.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 #include "llvm/Transforms/Utils/ModuleUtils.h"
41
42 using namespace llvm;
43
44 #define DEBUG_TYPE "safestack"
45
46 namespace llvm {
47
48 STATISTIC(NumFunctions, "Total number of functions");
49 STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
50 STATISTIC(NumUnsafeStackRestorePointsFunctions,
51 "Number of functions that use setjmp or exceptions");
52
53 STATISTIC(NumAllocas, "Total number of allocas");
54 STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
55 STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
56 STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
57
58 } // namespace llvm
59
60 namespace {
61
62 /// Check whether a given alloca instruction (AI) should be put on the safe
63 /// stack or not. The function analyzes all uses of AI and checks whether it is
64 /// only accessed in a memory safe way (as decided statically).
65 bool IsSafeStackAlloca(const AllocaInst *AI) {
66 // Go through all uses of this alloca and check whether all accesses to the
67 // allocated object are statically known to be memory safe and, hence, the
68 // object can be placed on the safe stack.
69
70 SmallPtrSet Visited;
71 SmallVector WorkList;
72 WorkList.push_back(AI);
73
74 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
75 while (!WorkList.empty()) {
76 const Instruction *V = WorkList.pop_back_val();
77 for (const Use &UI : V->uses()) {
78 auto I = cast(UI.getUser());
79 assert(V == UI.get());
80
81 switch (I->getOpcode()) {
82 case Instruction::Load:
83 // Loading from a pointer is safe.
84 break;
85 case Instruction::VAArg:
86 // "va-arg" from a pointer is safe.
87 break;
88 case Instruction::Store:
89 if (V == I->getOperand(0))
90 // Stored the pointer - conservatively assume it may be unsafe.
91 return false;
92 // Storing to the pointee is safe.
93 break;
94
95 case Instruction::GetElementPtr:
96 if (!cast(I)->hasAllConstantIndices())
97 // GEP with non-constant indices can lead to memory errors.
98 // This also applies to inbounds GEPs, as the inbounds attribute
99 // represents an assumption that the address is in bounds, rather than
100 // an assertion that it is.
101 return false;
102
103 // We assume that GEP on static alloca with constant indices is safe,
104 // otherwise a compiler would detect it and warn during compilation.
105
106 if (!isa(AI->getArraySize()))
107 // However, if the array size itself is not constant, the access
108 // might still be unsafe at runtime.
109 return false;
110
111 /* fallthrough */
112
113 case Instruction::BitCast:
114 case Instruction::IntToPtr:
115 case Instruction::PHI:
116 case Instruction::PtrToInt:
117 case Instruction::Select:
118 // The object can be safe or not, depending on how the result of the
119 // instruction is used.
120 if (Visited.insert(I).second)
121 WorkList.push_back(cast(I));
122 break;
123
124 case Instruction::Call:
125 case Instruction::Invoke: {
126 // FIXME: add support for memset and memcpy intrinsics.
127 ImmutableCallSite CS(I);
128
129 // LLVM 'nocapture' attribute is only set for arguments whose address
130 // is not stored, passed around, or used in any other non-trivial way.
131 // We assume that passing a pointer to an object as a 'nocapture'
132 // argument is safe.
133 // FIXME: a more precise solution would require an interprocedural
134 // analysis here, which would look at all uses of an argument inside
135 // the function being called.
136 ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
137 for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
138 if (A->get() == V && !CS.doesNotCapture(A - B))
139 // The parameter is not marked 'nocapture' - unsafe.
140 return false;
141 continue;
142 }
143
144 default:
145 // The object is unsafe if it is used in any other way.
146 return false;
147 }
148 }
149 }
150
151 // All uses of the alloca are safe, we can place it on the safe stack.
152 return true;
153 }
154
155 /// The SafeStack pass splits the stack of each function into the
156 /// safe stack, which is only accessed through memory safe dereferences
157 /// (as determined statically), and the unsafe stack, which contains all
158 /// local variables that are accessed in unsafe ways.
159 class SafeStack : public FunctionPass {
160 const DataLayout *DL;
161
162 Type *StackPtrTy;
163 Type *IntPtrTy;
164 Type *Int32Ty;
165 Type *Int8Ty;
166
167 Constant *UnsafeStackPtr;
168
169 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
170 /// aligned to this value. We need to re-align the unsafe stack if the
171 /// alignment of any object on the stack exceeds this value.
172 ///
173 /// 16 seems like a reasonable upper bound on the alignment of objects that we
174 /// might expect to appear on the stack on most common targets.
175 enum { StackAlignment = 16 };
176
177 /// \brief Build a constant representing a pointer to the unsafe stack
178 /// pointer.
179 Constant *getOrCreateUnsafeStackPtr(Module &M);
180
181 /// \brief Find all static allocas, dynamic allocas, return instructions and
182 /// stack restore points (exception unwind blocks and setjmp calls) in the
183 /// given function and append them to the respective vectors.
184 void findInsts(Function &F, SmallVectorImpl &StaticAllocas,
185 SmallVectorImpl &DynamicAllocas,
186 SmallVectorImpl &Returns,
187 SmallVectorImpl &StackRestorePoints);
188
189 /// \brief Allocate space for all static allocas in \p StaticAllocas,
190 /// replace allocas with pointers into the unsafe stack and generate code to
191 /// restore the stack pointer before all return instructions in \p Returns.
192 ///
193 /// \returns A pointer to the top of the unsafe stack after all unsafe static
194 /// allocas are allocated.
195 Value *moveStaticAllocasToUnsafeStack(Function &F,
196 ArrayRef StaticAllocas,
197 ArrayRef Returns);
198
199 /// \brief Generate code to restore the stack after all stack restore points
200 /// in \p StackRestorePoints.
201 ///
202 /// \returns A local variable in which to maintain the dynamic top of the
203 /// unsafe stack if needed.
204 AllocaInst *
205 createStackRestorePoints(Function &F,
206 ArrayRef StackRestorePoints,
207 Value *StaticTop, bool NeedDynamicTop);
208
209 /// \brief Replace all allocas in \p DynamicAllocas with code to allocate
210 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
211 /// top to \p DynamicTop if non-null.
212 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
213 AllocaInst *DynamicTop,
214 ArrayRef DynamicAllocas);
215
216 public:
217 static char ID; // Pass identification, replacement for typeid.
218 SafeStack() : FunctionPass(ID), DL(nullptr) {
219 initializeSafeStackPass(*PassRegistry::getPassRegistry());
220 }
221
222 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
223 AU.addRequired();
224 }
225
226 virtual bool doInitialization(Module &M) {
227 DL = &M.getDataLayout();
228
229 StackPtrTy = Type::getInt8PtrTy(M.getContext());
230 IntPtrTy = DL->getIntPtrType(M.getContext());
231 Int32Ty = Type::getInt32Ty(M.getContext());
232 Int8Ty = Type::getInt8Ty(M.getContext());
233
234 UnsafeStackPtr = getOrCreateUnsafeStackPtr(M);
235
236 return false;
237 }
238
239 bool runOnFunction(Function &F);
240
241 }; // class SafeStack
242
243 Constant *SafeStack::getOrCreateUnsafeStackPtr(Module &M) {
244 // The unsafe stack pointer is stored in a global variable with a magic name.
245 const char *kUnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
246
247 auto UnsafeStackPtr =
248 dyn_cast_or_null(M.getNamedValue(kUnsafeStackPtrVar));
249
250 if (!UnsafeStackPtr) {
251 // The global variable is not defined yet, define it ourselves.
252 // We use the initial-exec TLS model because we do not support the variable
253 // living anywhere other than in the main executable.
254 UnsafeStackPtr = new GlobalVariable(
255 /*Module=*/M, /*Type=*/StackPtrTy,
256 /*isConstant=*/false, /*Linkage=*/GlobalValue::ExternalLinkage,
257 /*Initializer=*/0, /*Name=*/kUnsafeStackPtrVar,
258 /*InsertBefore=*/nullptr,
259 /*ThreadLocalMode=*/GlobalValue::InitialExecTLSModel);
260 } else {
261 // The variable exists, check its type and attributes.
262 if (UnsafeStackPtr->getValueType() != StackPtrTy) {
263 report_fatal_error(Twine(kUnsafeStackPtrVar) + " must have void* type");
264 }
265
266 if (!UnsafeStackPtr->isThreadLocal()) {
267 report_fatal_error(Twine(kUnsafeStackPtrVar) + " must be thread-local");
268 }
269 }
270
271 return UnsafeStackPtr;
272 }
273
274 void SafeStack::findInsts(Function &F,
275 SmallVectorImpl &StaticAllocas,
276 SmallVectorImpl &DynamicAllocas,
277 SmallVectorImpl &Returns,
278 SmallVectorImpl &StackRestorePoints) {
279 for (Instruction &I : inst_range(&F)) {
280 if (auto AI = dyn_cast(&I)) {
281 ++NumAllocas;
282
283 if (IsSafeStackAlloca(AI))
284 continue;
285
286 if (AI->isStaticAlloca()) {
287 ++NumUnsafeStaticAllocas;
288 StaticAllocas.push_back(AI);
289 } else {
290 ++NumUnsafeDynamicAllocas;
291 DynamicAllocas.push_back(AI);
292 }
293 } else if (auto RI = dyn_cast(&I)) {
294 Returns.push_back(RI);
295 } else if (auto CI = dyn_cast(&I)) {
296 // setjmps require stack restore.
297 if (CI->getCalledFunction() && CI->canReturnTwice())
298 StackRestorePoints.push_back(CI);
299 } else if (auto LP = dyn_cast(&I)) {
300 // Exception landing pads require stack restore.
301 StackRestorePoints.push_back(LP);
302 } else if (auto II = dyn_cast(&I)) {
303 if (II->getIntrinsicID() == Intrinsic::gcroot)
304 llvm::report_fatal_error(
305 "gcroot intrinsic not compatible with safestack attribute");
306 }
307 }
308 }
309
310 AllocaInst *
311 SafeStack::createStackRestorePoints(Function &F,
312 ArrayRef StackRestorePoints,
313 Value *StaticTop, bool NeedDynamicTop) {
314 if (StackRestorePoints.empty())
315 return nullptr;
316
317 IRBuilder<> IRB(StaticTop
318 ? cast(StaticTop)->getNextNode()
319 : (Instruction *)F.getEntryBlock().getFirstInsertionPt());
320
321 // We need the current value of the shadow stack pointer to restore
322 // after longjmp or exception catching.
323
324 // FIXME: On some platforms this could be handled by the longjmp/exception
325 // runtime itself.
326
327 AllocaInst *DynamicTop = nullptr;
328 if (NeedDynamicTop)
329 // If we also have dynamic alloca's, the stack pointer value changes
330 // throughout the function. For now we store it in an alloca.
331 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
332 "unsafe_stack_dynamic_ptr");
333
334 if (!StaticTop)
335 // We need the original unsafe stack pointer value, even if there are
336 // no unsafe static allocas.
337 StaticTop = IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
338
339 if (NeedDynamicTop)
340 IRB.CreateStore(StaticTop, DynamicTop);
341
342 // Restore current stack pointer after longjmp/exception catch.
343 for (Instruction *I : StackRestorePoints) {
344 ++NumUnsafeStackRestorePoints;
345
346 IRB.SetInsertPoint(cast(I->getNextNode()));
347 Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
348 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
349 }
350
351 return DynamicTop;
352 }
353
354 Value *
355 SafeStack::moveStaticAllocasToUnsafeStack(Function &F,
356 ArrayRef StaticAllocas,
357 ArrayRef Returns) {
358 if (StaticAllocas.empty())
359 return nullptr;
360
361 IRBuilder<> IRB(F.getEntryBlock().getFirstInsertionPt());
362 DIBuilder DIB(*F.getParent());
363
364 // We explicitly compute and set the unsafe stack layout for all unsafe
365 // static alloca instructions. We save the unsafe "base pointer" in the
366 // prologue into a local variable and restore it in the epilogue.
367
368 // Load the current stack pointer (we'll also use it as a base pointer).
369 // FIXME: use a dedicated register for it ?
370 Instruction *BasePointer =
371 IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
372 assert(BasePointer->getType() == StackPtrTy);
373
374 for (ReturnInst *RI : Returns) {
375 IRB.SetInsertPoint(RI);
376 IRB.CreateStore(BasePointer, UnsafeStackPtr);
377 }
378
379 // Compute maximum alignment among static objects on the unsafe stack.
380 unsigned MaxAlignment = 0;
381 for (AllocaInst *AI : StaticAllocas) {
382 Type *Ty = AI->getAllocatedType();
383 unsigned Align =
384 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
385 if (Align > MaxAlignment)
386 MaxAlignment = Align;
387 }
388
389 if (MaxAlignment > StackAlignment) {
390 // Re-align the base pointer according to the max requested alignment.
391 assert(isPowerOf2_32(MaxAlignment));
392 IRB.SetInsertPoint(cast(BasePointer->getNextNode()));
393 BasePointer = cast(IRB.CreateIntToPtr(
394 IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
395 ConstantInt::get(IntPtrTy, ~uint64_t(MaxAlignment - 1))),
396 StackPtrTy));
397 }
398
399 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
400 int64_t StaticOffset = 0; // Current stack top.
401 for (AllocaInst *AI : StaticAllocas) {
402 IRB.SetInsertPoint(AI);
403
404 auto CArraySize = cast(AI->getArraySize());
405 Type *Ty = AI->getAllocatedType();
406
407 uint64_t Size = DL->getTypeAllocSize(Ty) * CArraySize->getZExtValue();
408 if (Size == 0)
409 Size = 1; // Don't create zero-sized stack objects.
410
411 // Ensure the object is properly aligned.
412 unsigned Align =
413 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
414
415 // Add alignment.
416 // NOTE: we ensure that BasePointer itself is aligned to >= Align.
417 StaticOffset += Size;
418 StaticOffset = RoundUpToAlignment(StaticOffset, Align);
419
420 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
421 ConstantInt::get(Int32Ty, -StaticOffset));
422 Value *NewAI = IRB.CreateBitCast(Off, AI->getType(), AI->getName());
423 if (AI->hasName() && isa(NewAI))
424 cast(NewAI)->takeName(AI);
425
426 // Replace alloc with the new location.
427 replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
428 AI->replaceAllUsesWith(NewAI);
429 AI->eraseFromParent();
430 }
431
432 // Re-align BasePointer so that our callees would see it aligned as
433 // expected.
434 // FIXME: no need to update BasePointer in leaf functions.
435 StaticOffset = RoundUpToAlignment(StaticOffset, StackAlignment);
436
437 // Update shadow stack pointer in the function epilogue.
438 IRB.SetInsertPoint(cast(BasePointer->getNextNode()));
439
440 Value *StaticTop =
441 IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -StaticOffset),
442 "unsafe_stack_static_top");
443 IRB.CreateStore(StaticTop, UnsafeStackPtr);
444 return StaticTop;
445 }
446
447 void SafeStack::moveDynamicAllocasToUnsafeStack(
448 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
449 ArrayRef DynamicAllocas) {
450 DIBuilder DIB(*F.getParent());
451
452 for (AllocaInst *AI : DynamicAllocas) {
453 IRBuilder<> IRB(AI);
454
455 // Compute the new SP value (after AI).
456 Value *ArraySize = AI->getArraySize();
457 if (ArraySize->getType() != IntPtrTy)
458 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
459
460 Type *Ty = AI->getAllocatedType();
461 uint64_t TySize = DL->getTypeAllocSize(Ty);
462 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
463
464 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
465 SP = IRB.CreateSub(SP, Size);
466
467 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
468 unsigned Align = std::max(
469 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()),
470 (unsigned)StackAlignment);
471
472 assert(isPowerOf2_32(Align));
473 Value *NewTop = IRB.CreateIntToPtr(
474 IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
475 StackPtrTy);
476
477 // Save the stack pointer.
478 IRB.CreateStore(NewTop, UnsafeStackPtr);
479 if (DynamicTop)
480 IRB.CreateStore(NewTop, DynamicTop);
481
482 Value *NewAI = IRB.CreateIntToPtr(SP, AI->getType());
483 if (AI->hasName() && isa(NewAI))
484 NewAI->takeName(AI);
485
486 replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
487 AI->replaceAllUsesWith(NewAI);
488 AI->eraseFromParent();
489 }
490
491 if (!DynamicAllocas.empty()) {
492 // Now go through the instructions again, replacing stacksave/stackrestore.
493 for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
494 Instruction *I = &*(It++);
495 auto II = dyn_cast(I);
496 if (!II)
497 continue;
498
499 if (II->getIntrinsicID() == Intrinsic::stacksave) {
500 IRBuilder<> IRB(II);
501 Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
502 LI->takeName(II);
503 II->replaceAllUsesWith(LI);
504 II->eraseFromParent();
505 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
506 IRBuilder<> IRB(II);
507 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
508 SI->takeName(II);
509 assert(II->use_empty());
510 II->eraseFromParent();
511 }
512 }
513 }
514 }
515
516 bool SafeStack::runOnFunction(Function &F) {
517 auto AA = &getAnalysis();
518
519 DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
520
521 if (!F.hasFnAttribute(Attribute::SafeStack)) {
522 DEBUG(dbgs() << "[SafeStack] safestack is not requested"
523 " for this function\n");
524 return false;
525 }
526
527 if (F.isDeclaration()) {
528 DEBUG(dbgs() << "[SafeStack] function definition"
529 " is not available\n");
530 return false;
531 }
532
533 {
534 // Make sure the regular stack protector won't run on this function
535 // (safestack attribute takes precedence).
536 AttrBuilder B;
537 B.addAttribute(Attribute::StackProtect)
538 .addAttribute(Attribute::StackProtectReq)
539 .addAttribute(Attribute::StackProtectStrong);
540 F.removeAttributes(
541 AttributeSet::FunctionIndex,
542 AttributeSet::get(F.getContext(), AttributeSet::FunctionIndex, B));
543 }
544
545 if (AA->onlyReadsMemory(&F)) {
546 // XXX: we don't protect against information leak attacks for now.
547 DEBUG(dbgs() << "[SafeStack] function only reads memory\n");
548 return false;
549 }
550
551 ++NumFunctions;
552
553 SmallVector StaticAllocas;
554 SmallVector DynamicAllocas;
555 SmallVector Returns;
556
557 // Collect all points where stack gets unwound and needs to be restored
558 // This is only necessary because the runtime (setjmp and unwind code) is
559 // not aware of the unsafe stack and won't unwind/restore it prorerly.
560 // To work around this problem without changing the runtime, we insert
561 // instrumentation to restore the unsafe stack pointer when necessary.
562 SmallVector StackRestorePoints;
563
564 // Find all static and dynamic alloca instructions that must be moved to the
565 // unsafe stack, all return instructions and stack restore points.
566 findInsts(F, StaticAllocas, DynamicAllocas, Returns, StackRestorePoints);
567
568 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
569 StackRestorePoints.empty())
570 return false; // Nothing to do in this function.
571
572 if (!StaticAllocas.empty() || !DynamicAllocas.empty())
573 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
574
575 if (!StackRestorePoints.empty())
576 ++NumUnsafeStackRestorePointsFunctions;
577
578 // The top of the unsafe stack after all unsafe static allocas are allocated.
579 Value *StaticTop = moveStaticAllocasToUnsafeStack(F, StaticAllocas, Returns);
580
581 // Safe stack object that stores the current unsafe stack top. It is updated
582 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
583 // This is only needed if we need to restore stack pointer after longjmp
584 // or exceptions, and we have dynamic allocations.
585 // FIXME: a better alternative might be to store the unsafe stack pointer
586 // before setjmp / invoke instructions.
587 AllocaInst *DynamicTop = createStackRestorePoints(
588 F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
589
590 // Handle dynamic allocas.
591 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
592 DynamicAllocas);
593
594 DEBUG(dbgs() << "[SafeStack] safestack applied\n");
595 return true;
596 }
597
598 } // end anonymous namespace
599
600 char SafeStack::ID = 0;
601 INITIALIZE_PASS_BEGIN(SafeStack, "safe-stack",
602 "Safe Stack instrumentation pass", false, false)
603 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
604 INITIALIZE_PASS_END(SafeStack, "safe-stack", "Safe Stack instrumentation pass",
605 false, false)
606
607 FunctionPass *llvm::createSafeStackPass() { return new SafeStack(); }
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Address-of local taken (j = &a)
6 ; Requires protector.
7
8 define void @foo() nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %retval = alloca i32, align 4
12 %a = alloca i32, align 4
13 %j = alloca i32*, align 8
14 store i32 0, i32* %retval
15 %0 = load i32, i32* %a, align 4
16 %add = add nsw i32 %0, 1
17 store i32 %add, i32* %a, align 4
18 store i32* %a, i32** %j, align 8
19 ret void
20 }
21
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; array of [16 x i8]
6
7 define void @foo(i8* %a) nounwind uwtable safestack {
8 entry:
9 ; CHECK: %[[USP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
10
11 ; CHECK: %[[USST:.*]] = getelementptr i8, i8* %[[USP]], i32 -16
12
13 ; CHECK: store i8* %[[USST]], i8** @__safestack_unsafe_stack_ptr
14
15 ; CHECK: %[[AADDR:.*]] = alloca i8*, align 8
16 %a.addr = alloca i8*, align 8
17
18 ; CHECK: %[[BUFPTR:.*]] = getelementptr i8, i8* %[[USP]], i32 -16
19 ; CHECK: %[[BUFPTR2:.*]] = bitcast i8* %[[BUFPTR]] to [16 x i8]*
20 %buf = alloca [16 x i8], align 16
21
22 ; CHECK: store i8* {{.*}}, i8** %[[AADDR]], align 8
23 store i8* %a, i8** %a.addr, align 8
24
25 ; CHECK: %[[GEP:.*]] = getelementptr inbounds [16 x i8], [16 x i8]* %[[BUFPTR2]], i32 0, i32 0
26 %gep = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
27
28 ; CHECK: %[[A2:.*]] = load i8*, i8** %[[AADDR]], align 8
29 %a2 = load i8*, i8** %a.addr, align 8
30
31 ; CHECK: call i8* @strcpy(i8* %[[GEP]], i8* %[[A2]])
32 %call = call i8* @strcpy(i8* %gep, i8* %a2)
33
34 ; CHECK: store i8* %[[USP]], i8** @__safestack_unsafe_stack_ptr
35 ret void
36 }
37
38 declare i8* @strcpy(i8*, i8*)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 ; array [4 x i8]
4 ; Requires protector.
5
6 define void @foo(i8* %a) nounwind uwtable safestack {
7 entry:
8 ; CHECK: %[[USP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
9
10 ; CHECK: %[[USST:.*]] = getelementptr i8, i8* %[[USP]], i32 -16
11
12 ; CHECK: store i8* %[[USST]], i8** @__safestack_unsafe_stack_ptr
13
14 ; CHECK: %[[AADDR:.*]] = alloca i8*, align 8
15 %a.addr = alloca i8*, align 8
16
17 ; CHECK: %[[BUFPTR:.*]] = getelementptr i8, i8* %[[USP]], i32 -4
18 ; CHECK: %[[BUFPTR2:.*]] = bitcast i8* %[[BUFPTR]] to [4 x i8]*
19 %buf = alloca [4 x i8], align 1
20
21 ; CHECK: store i8* {{.*}}, i8** %[[AADDR]], align 8
22 store i8* %a, i8** %a.addr, align 8
23
24 ; CHECK: %[[GEP:.*]] = getelementptr inbounds [4 x i8], [4 x i8]* %[[BUFPTR2]], i32 0, i32 0
25 %gep = getelementptr inbounds [4 x i8], [4 x i8]* %buf, i32 0, i32 0
26
27 ; CHECK: %[[A2:.*]] = load i8*, i8** %[[AADDR]], align 8
28 %a2 = load i8*, i8** %a.addr, align 8
29
30 ; CHECK: call i8* @strcpy(i8* %[[GEP]], i8* %[[A2]])
31 %call = call i8* @strcpy(i8* %gep, i8* %a2)
32
33 ; CHECK: store i8* %[[USP]], i8** @__safestack_unsafe_stack_ptr
34 ret void
35 }
36
37 declare i8* @strcpy(i8*, i8*)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; no arrays / no nested arrays
6 ; Requires no protector.
7
8 ; CHECK-LABEL: @foo(
9 define void @foo(i8* %a) nounwind uwtable safestack {
10 entry:
11 ; CHECK-NOT: __safestack_unsafe_stack_ptr
12 %a.addr = alloca i8*, align 8
13 store i8* %a, i8** %a.addr, align 8
14 %0 = load i8*, i8** %a.addr, align 8
15 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* %0)
16 ret void
17 }
18
19 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; PtrToInt/IntToPtr Cast
6 ; Requires no protector.
7
8 ; CHECK-LABEL: @foo(
9 define void @foo() nounwind uwtable safestack {
10 entry:
11 ; CHECK-NOT: __safestack_unsafe_stack_ptr
12 %a = alloca i32, align 4
13 %0 = ptrtoint i32* %a to i64
14 %1 = inttoptr i64 %0 to i32*
15 ret void
16 }
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.nest = type { %struct.pair, %struct.pair }
4 %struct.pair = type { i32, i32 }
5
6 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
7
8 ; Nested structure, no arrays, no address-of expressions.
9 ; Verify that the resulting gep-of-gep does not incorrectly trigger
10 ; a safe stack protector.
11 ; safestack attribute
12 ; Requires no protector.
13 ; CHECK-LABEL: @foo(
14 define void @foo() nounwind uwtable safestack {
15 entry:
16 ; CHECK-NOT: __safestack_unsafe_stack_ptr
17 %c = alloca %struct.nest, align 4
18 %b = getelementptr inbounds %struct.nest, %struct.nest* %c, i32 0, i32 1
19 %_a = getelementptr inbounds %struct.pair, %struct.pair* %b, i32 0, i32 0
20 %0 = load i32, i32* %_a, align 4
21 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32 %0)
22 ret void
23 }
24
25 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %class.A = type { [2 x i8] }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; [2 x i8] in a class
8 ; safestack attribute
9 ; Requires no protector.
10 ; CHECK-LABEL: @foo(
11 define signext i8 @foo() nounwind uwtable safestack {
12 entry:
13 ; CHECK-NOT: __safestack_unsafe_stack_ptr
14 %a = alloca %class.A, align 1
15 %array = getelementptr inbounds %class.A, %class.A* %a, i32 0, i32 0
16 %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
17 %0 = load i8, i8* %arrayidx, align 1
18 ret i8 %0
19 }
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.deep = type { %union.anon }
4 %union.anon = type { %struct.anon }
5 %struct.anon = type { %struct.anon.0 }
6 %struct.anon.0 = type { %union.anon.1 }
7 %union.anon.1 = type { [2 x i8] }
8
9 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
10
11 ; [2 x i8] nested in several layers of structs and unions
12 ; safestack attribute
13 ; Requires no protector.
14 ; CHECK-LABEL: @foo(
15 define signext i8 @foo() nounwind uwtable safestack {
16 entry:
17 ; CHECK-NOT: __safestack_unsafe_stack_ptr
18 %x = alloca %struct.deep, align 1
19 %b = getelementptr inbounds %struct.deep, %struct.deep* %x, i32 0, i32 0
20 %c = bitcast %union.anon* %b to %struct.anon*
21 %d = getelementptr inbounds %struct.anon, %struct.anon* %c, i32 0, i32 0
22 %e = getelementptr inbounds %struct.anon.0, %struct.anon.0* %d, i32 0, i32 0
23 %array = bitcast %union.anon.1* %e to [2 x i8]*
24 %arrayidx = getelementptr inbounds [2 x i8], [2 x i8]* %array, i32 0, i64 0
25 %0 = load i8, i8* %arrayidx, align 1
26 ret i8 %0
27 }
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Variable sized alloca
6 ; safestack attribute
7 ; Requires protector.
8 define void @foo(i32 %n) nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %n.addr = alloca i32, align 4
12 %a = alloca i32*, align 8
13 store i32 %n, i32* %n.addr, align 4
14 %0 = load i32, i32* %n.addr, align 4
15 %conv = sext i32 %0 to i64
16 %1 = alloca i8, i64 %conv
17 %2 = bitcast i8* %1 to i32*
18 store i32* %2, i32** %a, align 8
19 ret void
20 }
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a pointer
6 ; safestack attribute
7 ; Requires protector.
8 define void @foo() nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %a = alloca i32*, align 8
12 %b = alloca i32**, align 8
13 %call = call i32* @getp()
14 store i32* %call, i32** %a, align 8
15 store i32** %a, i32*** %b, align 8
16 %0 = load i32**, i32*** %b, align 8
17 call void @funcall2(i32** %0)
18 ret void
19 }
20
21 declare void @funcall2(i32**)
22 declare i32* @getp()
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a local cast to a ptr of a different type
6 ; (e.g., int a; ... ; float *b = &a;)
7 ; safestack attribute
8 ; Requires protector.
9 define void @foo() nounwind uwtable safestack {
10 entry:
11 ; CHECK: __safestack_unsafe_stack_ptr
12 %a = alloca i32, align 4
13 %b = alloca float*, align 8
14 store i32 0, i32* %a, align 4
15 %0 = bitcast i32* %a to float*
16 store float* %0, float** %b, align 8
17 %1 = load float*, float** %b, align 8
18 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), float* %1)
19 ret void
20 }
21
22 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a local cast to a ptr of a different type (optimized)
6 ; (e.g., int a; ... ; float *b = &a;)
7 ; safestack attribute
8 ; Requires protector.
9 define void @foo() nounwind uwtable safestack {
10 entry:
11 ; CHECK: __safestack_unsafe_stack_ptr
12 %a = alloca i32, align 4
13 store i32 0, i32* %a, align 4
14 %0 = bitcast i32* %a to float*
15 call void @funfloat(float* %0) nounwind
16 ret void
17 }
18
19 declare void @funfloat(float*)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Passing addr-of to function call
6 ; Requires protector.
7 define void @foo() nounwind uwtable safestack {
8 entry:
9 ; CHECK: __safestack_unsafe_stack_ptr
10 %b = alloca i32, align 4
11 call void @funcall(i32* %b) nounwind
12 ret void
13 }
14
15 declare void @funcall(i32*)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a casted pointer
6 ; safestack attribute
7 ; Requires protector.
8 define void @foo() nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %a = alloca i32*, align 8
12 %b = alloca float**, align 8
13 %call = call i32* @getp()
14 store i32* %call, i32** %a, align 8
15 %0 = bitcast i32** %a to float**
16 store float** %0, float*** %b, align 8
17 %1 = load float**, float*** %b, align 8
18 call void @funfloat2(float** %1)
19 ret void
20 }
21
22 declare void @funfloat2(float**)
23 declare i32* @getp()
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.pair = type { i32, i32 }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Addr-of struct element, GEP followed by callinst.
8 ; safestack attribute
9 ; Requires protector.
10 define void @foo() nounwind uwtable safestack {
11 entry:
12 ; CHECK: __safestack_unsafe_stack_ptr
13 %c = alloca %struct.pair, align 4
14 %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i64 0, i32 1
15 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32* %y) nounwind
16 ret void
17 }
18
19 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.pair = type { i32, i32 }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Addr-of a struct element passed into an invoke instruction.
8 ; (GEP followed by an invoke)
9 ; safestack attribute
10 ; Requires protector.
11 define i32 @foo() uwtable safestack {
12 entry:
13 ; CHECK: __safestack_unsafe_stack_ptr
14 %c = alloca %struct.pair, align 4
15 %exn.slot = alloca i8*
16 %ehselector.slot = alloca i32
17 %a = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
18 store i32 0, i32* %a, align 4
19 %a1 = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 0
20 invoke void @_Z3exceptPi(i32* %a1)
21 to label %invoke.cont unwind label %lpad
22
23 invoke.cont:
24 ret i32 0
25
26 lpad:
27 %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
28 catch i8* null
29 ret i32 0
30 }
31
32 declare void @_Z3exceptPi(i32*)
33 declare i32 @__gxx_personality_v0(...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a local, optimized into a GEP (e.g., &a - 12)
6 ; safestack attribute
7 ; Requires protector.
8 define void @foo() nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %a = alloca i32, align 4
12 %add.ptr5 = getelementptr inbounds i32, i32* %a, i64 -12
13 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), i32* %add.ptr5) nounwind
14 ret void
15 }
16
17 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.pair = type { i32, i32 }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Addr-of struct element, GEP followed by ptrtoint.
8 ; safestack attribute
9 ; Requires protector.
10 define void @foo() nounwind uwtable safestack {
11 entry:
12 ; CHECK: __safestack_unsafe_stack_ptr
13 %c = alloca %struct.pair, align 4
14 %b = alloca i32*, align 8
15 %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
16 %0 = ptrtoint i32* %y to i64
17 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i64 %0)
18 ret void
19 }
20
21 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.pair = type { i32, i32 }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Addr-of struct element. (GEP followed by store).
8 ; safestack attribute
9 ; Requires protector.
10 define void @foo() nounwind uwtable safestack {
11 entry:
12 ; CHECK: __safestack_unsafe_stack_ptr
13 %c = alloca %struct.pair, align 4
14 %b = alloca i32*, align 8
15 %y = getelementptr inbounds %struct.pair, %struct.pair* %c, i32 0, i32 1
16 store i32* %y, i32** %b, align 8
17 %0 = load i32*, i32** %b, align 8
18 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i32* %0)
19 ret void
20 }
21
22 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of in phi instruction
6 ; Requires protector.
7 define void @foo() nounwind uwtable safestack {
8 entry:
9 ; CHECK: __safestack_unsafe_stack_ptr
10 %x = alloca double, align 8
11 %call = call double @testi_aux() nounwind
12 store double %call, double* %x, align 8
13 %cmp = fcmp ogt double %call, 3.140000e+00
14 br i1 %cmp, label %if.then, label %if.else
15
16 if.then: ; preds = %entry
17 %call1 = call double @testi_aux() nounwind
18 store double %call1, double* %x, align 8
19 br label %if.end4
20
21 if.else: ; preds = %entry
22 %cmp2 = fcmp ogt double %call, 1.000000e+00
23 br i1 %cmp2, label %if.then3, label %if.end4
24
25 if.then3: ; preds = %if.else
26 br label %if.end4
27
28 if.end4: ; preds = %if.else, %if.then3, %if.then
29 %y.0 = phi double* [ null, %if.then ], [ %x, %if.then3 ], [ null, %if.else ]
30 %call5 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), double* %y.0) nounwind
31 ret void
32 }
33
34 declare double @testi_aux()
35 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of in select instruction
6 ; safestack attribute
7 ; Requires protector.
8 define void @foo() nounwind uwtable safestack {
9 entry:
10 ; CHECK: __safestack_unsafe_stack_ptr
11 %x = alloca double, align 8
12 %call = call double @testi_aux() nounwind
13 store double %call, double* %x, align 8
14 %cmp2 = fcmp ogt double %call, 0.000000e+00
15 %y.1 = select i1 %cmp2, double* %x, double* null
16 %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), double* %y.1)
17 ret void
18 }
19
20 declare double @testi_aux()
21 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.vec = type { <4 x i32> }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Addr-of a vector nested in a struct
8 ; safestack attribute
9 ; Requires protector.
10 define void @foo() nounwind uwtable safestack {
11 entry:
12 ; CHECK: __safestack_unsafe_stack_ptr
13 %c = alloca %struct.vec, align 16
14 %y = getelementptr inbounds %struct.vec, %struct.vec* %c, i64 0, i32 0
15 %add.ptr = getelementptr inbounds <4 x i32>, <4 x i32>* %y, i64 -12
16 %call = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i64 0, i64 0), <4 x i32>* %add.ptr) nounwind
17 ret void
18 }
19
20 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; Addr-of a variable passed into an invoke instruction.
6 ; safestack attribute
7 ; Requires protector and stack restore after landing pad.
8 define i32 @foo() uwtable safestack {
9 entry:
10 ; CHECK: %[[SP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
11 ; CHECK: %[[STATICTOP:.*]] = getelementptr i8, i8* %[[SP]], i32 -16
12 %a = alloca i32, align 4
13 %exn.slot = alloca i8*
14 %ehselector.slot = alloca i32
15 store i32 0, i32* %a, align 4
16 invoke void @_Z3exceptPi(i32* %a)
17 to label %invoke.cont unwind label %lpad
18
19 invoke.cont:
20 ret i32 0
21
22 lpad:
23 ; CHECK: landingpad
24 ; CHECK-NEXT: catch
25 %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
26 catch i8* null
27 ; CHECK-NEXT: store i8* %[[STATICTOP]], i8** @__safestack_unsafe_stack_ptr
28 ret i32 0
29 }
30
31 declare void @_Z3exceptPi(i32*)
32 declare i32 @__gxx_personality_v0(...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
4
5 ; no safestack attribute
6 ; Requires no protector.
7
8 ; CHECK: @foo
9 define void @foo(i8* %a) nounwind uwtable {
10 entry:
11 ; CHECK-NOT: __safestack_unsafe_stack_ptr
12 %a.addr = alloca i8*, align 8
13 %buf = alloca [16 x i8], align 16
14 store i8* %a, i8** %a.addr, align 8
15 %arraydecay = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
16 %0 = load i8*, i8** %a.addr, align 8
17 %call = call i8* @strcpy(i8* %arraydecay, i8* %0)
18 %arraydecay1 = getelementptr inbounds [16 x i8], [16 x i8]* %buf, i32 0, i32 0
19 %call2 = call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @.str, i32 0, i32 0), i8* %arraydecay1)
20 ret void
21 }
22
23 declare i8* @strcpy(i8*, i8*)
24 declare i32 @printf(i8*, ...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.small = type { i8 }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; Address-of a structure taken in a function with a loop where
8 ; the alloca is an incoming value to a PHI node and a use of that PHI
9 ; node is also an incoming value.
10 ; Verify that the address-of analysis does not get stuck in infinite
11 ; recursion when chasing the alloca through the PHI nodes.
12 ; Requires protector.
13 define i32 @foo(i32 %arg) nounwind uwtable safestack {
14 bb:
15 ; CHECK: __safestack_unsafe_stack_ptr
16 %tmp = alloca %struct.small*, align 8
17 %tmp1 = call i32 (...) @dummy(%struct.small** %tmp) nounwind
18 %tmp2 = load %struct.small*, %struct.small** %tmp, align 8
19 %tmp3 = ptrtoint %struct.small* %tmp2 to i64
20 %tmp4 = trunc i64 %tmp3 to i32
21 %tmp5 = icmp sgt i32 %tmp4, 0
22 br i1 %tmp5, label %bb6, label %bb21
23
24 bb6: ; preds = %bb17, %bb
25 %tmp7 = phi %struct.small* [ %tmp19, %bb17 ], [ %tmp2, %bb ]
26 %tmp8 = phi i64 [ %tmp20, %bb17 ], [ 1, %bb ]
27 %tmp9 = phi i32 [ %tmp14, %bb17 ], [ %tmp1, %bb ]
28 %tmp10 = getelementptr inbounds %struct.small, %struct.small* %tmp7, i64 0, i32 0
29 %tmp11 = load i8, i8* %tmp10, align 1
30 %tmp12 = icmp eq i8 %tmp11, 1
31 %tmp13 = add nsw i32 %tmp9, 8
32 %tmp14 = select i1 %tmp12, i32 %tmp13, i32 %tmp9
33 %tmp15 = trunc i64 %tmp8 to i32
34 %tmp16 = icmp eq i32 %tmp15, %tmp4
35 br i1 %tmp16, label %bb21, label %bb17
36
37 bb17: ; preds = %bb6
38 %tmp18 = getelementptr inbounds %struct.small*, %struct.small** %tmp, i64 %tmp8
39 %tmp19 = load %struct.small*, %struct.small** %tmp18, align 8
40 %tmp20 = add i64 %tmp8, 1
41 br label %bb6
42
43 bb21: ; preds = %bb6, %bb
44 %tmp22 = phi i32 [ %tmp1, %bb ], [ %tmp14, %bb6 ]
45 %tmp23 = call i32 (...) @dummy(i32 %tmp22) nounwind
46 ret i32 undef
47 }
48
49 declare i32 @dummy(...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.__jmp_buf_tag = type { [8 x i64], i32, %struct.__sigset_t }
4 %struct.__sigset_t = type { [16 x i64] }
5
6 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
7 @buf = internal global [1 x %struct.__jmp_buf_tag] zeroinitializer, align 16
8
9 ; setjmp/longjmp test.
10 ; Requires protector.
11 define i32 @foo() nounwind uwtable safestack {
12 entry:
13 ; CHECK: %[[SP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
14 ; CHECK: %[[STATICTOP:.*]] = getelementptr i8, i8* %[[SP]], i32 -16
15 %retval = alloca i32, align 4
16 %x = alloca i32, align 4
17 store i32 0, i32* %retval
18 store i32 42, i32* %x, align 4
19 %call = call i32 @_setjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @buf, i32 0, i32 0)) returns_twice
20 ; CHECK: setjmp
21 ; CHECK-NEXT: store i8* %[[STATICTOP]], i8** @__safestack_unsafe_stack_ptr
22 %tobool = icmp ne i32 %call, 0
23 br i1 %tobool, label %if.else, label %if.then
24 if.then: ; preds = %entry
25 call void @funcall(i32* %x)
26 br label %if.end
27 if.else: ; preds = %entry
28 call i32 (...) @dummy()
29 br label %if.end
30 if.end: ; preds = %if.else, %if.then
31 ret i32 0
32 }
33
34 declare i32 @_setjmp(%struct.__jmp_buf_tag*)
35 declare void @funcall(i32*)
36 declare i32 @dummy(...)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.__jmp_buf_tag = type { [8 x i64], i32, %struct.__sigset_t }
4 %struct.__sigset_t = type { [16 x i64] }
5
6 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
7 @buf = internal global [1 x %struct.__jmp_buf_tag] zeroinitializer, align 16
8
9 ; setjmp/longjmp test with dynamically sized array.
10 ; Requires protector.
11 ; CHECK: @foo(i32 %[[ARG:.*]])
12 define i32 @foo(i32 %size) nounwind uwtable safestack {
13 entry:
14 ; CHECK: %[[DYNPTR:.*]] = alloca i8*
15 ; CHECK-NEXT: %[[SP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
16 ; CHECK-NEXT: store i8* %[[SP]], i8** %[[DYNPTR]]
17
18 ; CHECK-NEXT: %[[ZEXT:.*]] = zext i32 %[[ARG]] to i64
19 ; CHECK-NEXT: %[[MUL:.*]] = mul i64 %[[ZEXT]], 4
20 ; CHECK-NEXT: %[[SP2:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
21 ; CHECK-NEXT: %[[PTRTOINT:.*]] = ptrtoint i8* %[[SP2]] to i64
22 ; CHECK-NEXT: %[[SUB:.*]] = sub i64 %[[PTRTOINT]], %[[MUL]]
23 ; CHECK-NEXT: %[[AND:.*]] = and i64 %[[SUB]], -16
24 ; CHECK-NEXT: %[[INTTOPTR:.*]] = inttoptr i64 %[[AND]] to i8*
25 ; CHECK-NEXT: store i8* %[[INTTOPTR]], i8** @__safestack_unsafe_stack_ptr
26 ; CHECK-NEXT: store i8* %[[INTTOPTR]], i8** %unsafe_stack_dynamic_ptr
27 ; CHECK-NEXT: %[[ALLOCA:.*]] = inttoptr i64 %[[SUB]] to i32*
28 %a = alloca i32, i32 %size
29
30 ; CHECK: setjmp
31 ; CHECK-NEXT: %[[LOAD:.*]] = load i8*, i8** %[[DYNPTR]]
32 ; CHECK-NEXT: store i8* %[[LOAD]], i8** @__safestack_unsafe_stack_ptr
33 %call = call i32 @_setjmp(%struct.__jmp_buf_tag* getelementptr inbounds ([1 x %struct.__jmp_buf_tag], [1 x %struct.__jmp_buf_tag]* @buf, i32 0, i32 0)) returns_twice
34
35 ; CHECK: call void @funcall(i32* %[[ALLOCA]])
36 call void @funcall(i32* %a)
37 ret i32 0
38 }
39
40 declare i32 @_setjmp(%struct.__jmp_buf_tag*)
41 declare void @funcall(i32*)
0 ; RUN: opt -safe-stack -S -mtriple=i386-pc-linux-gnu < %s -o - | FileCheck %s
1 ; RUN: opt -safe-stack -S -mtriple=x86_64-pc-linux-gnu < %s -o - | FileCheck %s
2
3 %struct.foo = type { [16 x i8] }
4
5 @.str = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
6
7 ; struct { [16 x i8] }
8
9 define void @foo(i8* %a) nounwind uwtable safestack {
10 entry:
11 ; CHECK: %[[USP:.*]] = load i8*, i8** @__safestack_unsafe_stack_ptr
12
13 ; CHECK: %[[USST:.*]] = getelementptr i8, i8* %[[USP]], i32 -16
14
15 ; CHECK: store i8* %[[USST]], i8** @__safestack_unsafe_stack_ptr
16
17 ; CHECK: %[[AADDR:.*]] = alloca i8*, align 8
18 %a.addr = alloca i8*, align 8
19
20 ; CHECK: %[[BUFPTR:.*]] = getelementptr i8, i8* %[[USP]], i32 -16
21 ; CHECK: %[[BUFPTR2:.*]] = bitcast i8* %[[BUFPTR]] to %struct.foo*
22 %buf = alloca %struct.foo, align 1
23
24 ; CHECK: store i8* {{.*}}, i8** %[[AADDR]], align 8
25 store i8* %a, i8** %a.addr, align 8
26
27 ; CHECK: %[[GEP:.*]] = getelementptr inbounds %struct.foo, %struct.foo* %[[BUFPTR2]], i32 0, i32 0, i32 0
28 %gep = getelementptr inbounds %struct.foo, %struct.foo* %buf, i32 0, i32 0, i32 0
29
30 ; CHECK: %[[A:.*]] = load i8*, i8** %[[AADDR]], align 8
31 %a2 = load i8*, i8** %a.addr, align 8
32
33 ; CHECK: call i8* @strcpy(i8* %[[GEP]], i8* %[[A]])
34 %call = call i8* @strcpy(i8* %gep, i8* %a2)
35
36 ; CHECK: store i8* %[[USP]], i8** @__safestack_unsafe_stack_ptr
37 ret void
38 }
39
40 declare i8* @strcpy(i8*, i8*)