llvm.org GIT mirror llvm / 5b6af71
[stackprotector] Use analysis from the StackProtector pass for stack layout in PEI a nd LocalStackSlot passes. This changes the MachineFrameInfo API to use the new SSPLayoutKind information produced by the StackProtector pass (instead of a boolean flag) and updates a few pass dependencies (to preserve the SSP analysis). The stack layout follows the same approach used prior to this change - i.e., only LargeArray stack objects will be placed near the canary and everything else will be laid out normally. After this change, structures containing large arrays will also be placed near the canary - a case previously missed by the old implementation. Out of tree targets will need to update their usage of MachineFrameInfo::CreateStackObject to remove the MayNeedSP argument. The next patch will implement the rules for sspstrong and sspreq. The end goal is to support ssp-strong stack layout rules. WIP. Differential Revision: http://llvm-reviews.chandlerc.com/D2158 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@197653 91177308-0d34-0410-b5e6-96231b3b80d8 Josh Magee 6 years ago
18 changed file(s) with 562 addition(s) and 57 deletion(s). Raw diff Collapse all Expand all
100100 // cannot alias any other memory objects.
101101 bool isSpillSlot;
102102
103 // MayNeedSP - If true the stack object triggered the creation of the stack
104 // protector. We should allocate this object right after the stack
105 // protector.
106 bool MayNeedSP;
107
108103 /// Alloca - If this stack object is originated from an Alloca instruction
109104 /// this value saves the original IR allocation. Can be NULL.
110105 const AllocaInst *Alloca;
114109 bool PreAllocated;
115110
116111 StackObject(uint64_t Sz, unsigned Al, int64_t SP, bool IM,
117 bool isSS, bool NSP, const AllocaInst *Val)
112 bool isSS, const AllocaInst *Val)
118113 : SPOffset(SP), Size(Sz), Alignment(Al), isImmutable(IM),
119 isSpillSlot(isSS), MayNeedSP(NSP), Alloca(Val), PreAllocated(false) {}
114 isSpillSlot(isSS), Alloca(Val), PreAllocated(false) {}
120115 };
121116
122117 const TargetMachine &TM;
405400 return Objects[ObjectIdx+NumFixedObjects].Alloca;
406401 }
407402
408 /// NeedsStackProtector - Returns true if the object may need stack
409 /// protectors.
410 bool MayNeedStackProtector(int ObjectIdx) const {
411 assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
412 "Invalid Object Idx!");
413 return Objects[ObjectIdx+NumFixedObjects].MayNeedSP;
414 }
415
416403 /// getObjectOffset - Return the assigned stack offset of the specified object
417404 /// from the incoming stack pointer.
418405 ///
530517 /// a nonnegative identifier to represent it.
531518 ///
532519 int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
533 bool MayNeedSP = false, const AllocaInst *Alloca = 0);
520 const AllocaInst *Alloca = 0);
534521
535522 /// CreateSpillStackObject - Create a new statically sized stack object that
536523 /// represents a spill slot, returning a nonnegative identifier to represent
550537 /// variable sized object is created, whether or not the index returned is
551538 /// actually used.
552539 ///
553 int CreateVariableSizedObject(unsigned Alignment);
540 int CreateVariableSizedObject(unsigned Alignment, const AllocaInst *Alloca);
554541
555542 /// getCalleeSavedInfo - Returns a reference to call saved info vector for the
556543 /// current function.
1919 #include "llvm/ADT/SmallPtrSet.h"
2020 #include "llvm/ADT/Triple.h"
2121 #include "llvm/ADT/ValueMap.h"
22 #include "llvm/Analysis/Dominators.h"
2223 #include "llvm/Pass.h"
2324 #include "llvm/Target/TargetLowering.h"
2425
2526 namespace llvm {
26 class DominatorTree;
2727 class Function;
2828 class Module;
2929 class PHINode;
1616 #define DEBUG_TYPE "localstackalloc"
1717 #include "llvm/CodeGen/Passes.h"
1818 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SetVector.h"
1920 #include "llvm/ADT/SmallSet.h"
2021 #include "llvm/ADT/Statistic.h"
2122 #include "llvm/CodeGen/MachineFrameInfo.h"
2223 #include "llvm/CodeGen/MachineFunction.h"
2324 #include "llvm/CodeGen/MachineFunctionPass.h"
2425 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/StackProtector.h"
2527 #include "llvm/IR/Constants.h"
2628 #include "llvm/IR/DerivedTypes.h"
2729 #include "llvm/IR/Instructions.h"
5961
6062 class LocalStackSlotPass: public MachineFunctionPass {
6163 SmallVector LocalOffsets;
64 /// StackObjSet - A set of stack object indexes
65 typedef SmallSetVector StackObjSet;
6266
6367 void AdjustStackOffset(MachineFrameInfo *MFI, int FrameIdx, int64_t &Offset,
6468 bool StackGrowsDown, unsigned &MaxAlign);
69 void AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
70 SmallSet &ProtectedObjs,
71 MachineFrameInfo *MFI, bool StackGrowsDown,
72 int64_t &Offset, unsigned &MaxAlign);
6573 void calculateFrameObjectOffsets(MachineFunction &Fn);
6674 bool insertFrameReferenceRegisters(MachineFunction &Fn);
6775 public:
6876 static char ID; // Pass identification, replacement for typeid
69 explicit LocalStackSlotPass() : MachineFunctionPass(ID) { }
77 explicit LocalStackSlotPass() : MachineFunctionPass(ID) {
78 initializeLocalStackSlotPassPass(*PassRegistry::getPassRegistry());
79 }
7080 bool runOnMachineFunction(MachineFunction &MF);
7181
7282 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
7383 AU.setPreservesCFG();
84 AU.addRequired();
7485 MachineFunctionPass::getAnalysisUsage(AU);
7586 }
7687
8091
8192 char LocalStackSlotPass::ID = 0;
8293 char &llvm::LocalStackSlotAllocationID = LocalStackSlotPass::ID;
83 INITIALIZE_PASS(LocalStackSlotPass, "localstackalloc",
84 "Local Stack Slot Allocation", false, false)
94 INITIALIZE_PASS_BEGIN(LocalStackSlotPass, "localstackalloc",
95 "Local Stack Slot Allocation", false, false)
96 INITIALIZE_PASS_DEPENDENCY(StackProtector)
97 INITIALIZE_PASS_END(LocalStackSlotPass, "localstackalloc",
98 "Local Stack Slot Allocation", false, false)
99
85100
86101 bool LocalStackSlotPass::runOnMachineFunction(MachineFunction &MF) {
87102 MachineFrameInfo *MFI = MF.getFrameInfo();
142157 Offset += MFI->getObjectSize(FrameIdx);
143158
144159 ++NumAllocations;
160 }
161
162 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
163 /// those required to be close to the Stack Protector) to stack offsets.
164 void LocalStackSlotPass::AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
165 SmallSet &ProtectedObjs,
166 MachineFrameInfo *MFI,
167 bool StackGrowsDown, int64_t &Offset,
168 unsigned &MaxAlign) {
169
170 for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
171 E = UnassignedObjs.end(); I != E; ++I) {
172 int i = *I;
173 AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
174 ProtectedObjs.insert(i);
175 }
145176 }
146177
147178 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
155186 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
156187 int64_t Offset = 0;
157188 unsigned MaxAlign = 0;
189 StackProtector *SP = &getAnalysis();
158190
159191 // Make sure that the stack protector comes before the local variables on the
160192 // stack.
161 SmallSet LargeStackObjs;
193 SmallSet ProtectedObjs;
162194 if (MFI->getStackProtectorIndex() >= 0) {
195 StackObjSet LargeArrayObjs;
163196 AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), Offset,
164197 StackGrowsDown, MaxAlign);
165198
169202 continue;
170203 if (MFI->getStackProtectorIndex() == (int)i)
171204 continue;
172 if (!MFI->MayNeedStackProtector(i))
173 continue;
174
175 AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
176 LargeStackObjs.insert(i);
177 }
205
206 switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
207 case StackProtector::SSPLK_None:
208 case StackProtector::SSPLK_SmallArray:
209 case StackProtector::SSPLK_AddrOf:
210 continue;
211 case StackProtector::SSPLK_LargeArray:
212 LargeArrayObjs.insert(i);
213 continue;
214 }
215 llvm_unreachable("Unexpected SSPLayoutKind.");
216 }
217
218 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
219 Offset, MaxAlign);
178220 }
179221
180222 // Then assign frame offsets to stack objects that are not used to spill
184226 continue;
185227 if (MFI->getStackProtectorIndex() == (int)i)
186228 continue;
187 if (LargeStackObjs.count(i))
229 if (ProtectedObjs.count(i))
188230 continue;
189231
190232 AdjustStackOffset(MFI, i, Offset, StackGrowsDown, MaxAlign);
498498 /// a nonnegative identifier to represent it.
499499 ///
500500 int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
501 bool isSS, bool MayNeedSP, const AllocaInst *Alloca) {
501 bool isSS, const AllocaInst *Alloca) {
502502 assert(Size != 0 && "Cannot allocate zero size stack objects!");
503503 Alignment =
504504 clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
505505 !RealignOption,
506506 Alignment, getFrameLowering()->getStackAlignment());
507 Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP,
508 Alloca));
507 Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca));
509508 int Index = (int)Objects.size() - NumFixedObjects - 1;
510509 assert(Index >= 0 && "Bad frame index!");
511510 ensureMaxAlignment(Alignment);
522521 clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
523522 !RealignOption,
524523 Alignment, getFrameLowering()->getStackAlignment());
525 CreateStackObject(Size, Alignment, true, false);
524 CreateStackObject(Size, Alignment, true);
526525 int Index = (int)Objects.size() - NumFixedObjects - 1;
527526 ensureMaxAlignment(Alignment);
528527 return Index;
533532 /// variable sized object is created, whether or not the index returned is
534533 /// actually used.
535534 ///
536 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment) {
535 int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
536 const AllocaInst *Alloca) {
537537 HasVarSizedObjects = true;
538538 Alignment =
539539 clampStackAlignment(!getFrameLowering()->isStackRealignable() ||
540540 !RealignOption,
541541 Alignment, getFrameLowering()->getStackAlignment());
542 Objects.push_back(StackObject(0, Alignment, 0, false, false, true, 0));
542 Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca));
543543 ensureMaxAlignment(Alignment);
544544 return (int)Objects.size()-NumFixedObjects-1;
545545 }
564564 Align, getFrameLowering()->getStackAlignment());
565565 Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
566566 /*isSS*/ false,
567 /*NeedSP*/ false,
568567 /*Alloca*/ 0));
569568 return -++NumFixedObjects;
570569 }
5050 AU.addPreserved("domfrontier");
5151 AU.addPreserved("loops");
5252 AU.addPreserved("lda");
53 AU.addPreserved("stack-protector");
5354
5455 FunctionPass::getAnalysisUsage(AU);
5556 }
426426 /// Add common passes that perform LLVM IR to IR transforms in preparation for
427427 /// instruction selection.
428428 void TargetPassConfig::addISelPrepare() {
429 addPreISel();
430
429431 addPass(createStackProtectorPass(TM));
430
431 addPreISel();
432432
433433 if (PrintISelInput)
434434 addPass(createPrintFunctionPass("\n\n"
1919 #include "PrologEpilogInserter.h"
2020 #include "llvm/ADT/IndexedMap.h"
2121 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
2223 #include "llvm/ADT/SmallSet.h"
2324 #include "llvm/ADT/Statistic.h"
2425 #include "llvm/CodeGen/MachineDominators.h"
2930 #include "llvm/CodeGen/MachineRegisterInfo.h"
3031 #include "llvm/CodeGen/RegisterScavenging.h"
3132 #include "llvm/IR/DiagnosticInfo.h"
33 #include "llvm/CodeGen/StackProtector.h"
3234 #include "llvm/IR/InlineAsm.h"
3335 #include "llvm/IR/LLVMContext.h"
3436 #include "llvm/Support/CommandLine.h"
5557 "Prologue/Epilogue Insertion", false, false)
5658 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
5759 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
60 INITIALIZE_PASS_DEPENDENCY(StackProtector)
5861 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
5962 INITIALIZE_PASS_END(PEI, "prologepilog",
6063 "Prologue/Epilogue Insertion & Frame Finalization",
6871 AU.setPreservesCFG();
6972 AU.addPreserved();
7073 AU.addPreserved();
74 AU.addRequired();
7175 AU.addRequired();
7276 MachineFunctionPass::getAnalysisUsage(AU);
7377 }
9599
96100 return;
97101 }
102
103 /// StackObjSet - A set of stack object indexes
104 typedef SmallSetVector StackObjSet;
98105
99106 /// runOnMachineFunction - Insert prolog/epilog code and replace abstract
100107 /// frame indexes with appropriate references.
411418 }
412419 }
413420
421 /// AssignProtectedObjSet - Helper function to assign large stack objects (i.e.,
422 /// those required to be close to the Stack Protector) to stack offsets.
423 static void
424 AssignProtectedObjSet(const StackObjSet &UnassignedObjs,
425 SmallSet &ProtectedObjs,
426 MachineFrameInfo *MFI, bool StackGrowsDown,
427 int64_t &Offset, unsigned &MaxAlign) {
428
429 for (StackObjSet::const_iterator I = UnassignedObjs.begin(),
430 E = UnassignedObjs.end(); I != E; ++I) {
431 int i = *I;
432 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
433 ProtectedObjs.insert(i);
434 }
435 }
436
414437 /// calculateFrameObjectOffsets - Calculate actual frame offsets for all of the
415438 /// abstract stack objects.
416439 ///
417440 void PEI::calculateFrameObjectOffsets(MachineFunction &Fn) {
418441 const TargetFrameLowering &TFI = *Fn.getTarget().getFrameLowering();
442 StackProtector *SP = &getAnalysis();
419443
420444 bool StackGrowsDown =
421445 TFI.getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
525549
526550 // Make sure that the stack protector comes before the local variables on the
527551 // stack.
528 SmallSet LargeStackObjs;
552 SmallSet ProtectedObjs;
529553 if (MFI->getStackProtectorIndex() >= 0) {
554 StackObjSet LargeArrayObjs;
530555 AdjustStackOffset(MFI, MFI->getStackProtectorIndex(), StackGrowsDown,
531556 Offset, MaxAlign);
532557
543568 continue;
544569 if (MFI->getStackProtectorIndex() == (int)i)
545570 continue;
546 if (!MFI->MayNeedStackProtector(i))
571
572 switch (SP->getSSPLayout(MFI->getObjectAllocation(i))) {
573 case StackProtector::SSPLK_None:
574 case StackProtector::SSPLK_SmallArray:
575 case StackProtector::SSPLK_AddrOf:
547576 continue;
548
549 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
550 LargeStackObjs.insert(i);
551 }
577 case StackProtector::SSPLK_LargeArray:
578 LargeArrayObjs.insert(i);
579 continue;
580 }
581 llvm_unreachable("Unexpected SSPLayoutKind.");
582 }
583
584 AssignProtectedObjSet(LargeArrayObjs, ProtectedObjs, MFI, StackGrowsDown,
585 Offset, MaxAlign);
552586 }
553587
554588 // Then assign frame offsets to stack objects that are not used to spill
565599 continue;
566600 if (MFI->getStackProtectorIndex() == (int)i)
567601 continue;
568 if (LargeStackObjs.count(i))
602 if (ProtectedObjs.count(i))
569603 continue;
570604
571605 AdjustStackOffset(MFI, i, StackGrowsDown, Offset, MaxAlign);
8484 TySize *= CUI->getZExtValue(); // Get total allocated size.
8585 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
8686
87 // The object may need to be placed onto the stack near the stack
88 // protector if one exists. Determine here if this object is a suitable
89 // candidate. I.e., it would trigger the creation of a stack protector.
90 bool MayNeedSP =
91 (AI->isArrayAllocation() ||
92 (TySize >= 8 && isa(Ty) &&
93 cast(Ty)->getElementType()->isIntegerTy(8)));
9487 StaticAllocaMap[AI] =
95 MF->getFrameInfo()->CreateStackObject(TySize, Align, false,
96 MayNeedSP, AI);
88 MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI);
9789 }
9890
9991 for (; BB != EB; ++BB)
33793379
33803380 // Inform the Frame Information that we have just allocated a variable-sized
33813381 // object.
3382 FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1);
3382 FuncInfo.MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, &I);
33833383 }
33843384
33853385 void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
1919 #include "llvm/CodeGen/Passes.h"
2020 #include "llvm/ADT/SmallPtrSet.h"
2121 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
2322 #include "llvm/Analysis/ValueTracking.h"
2423 #include "llvm/IR/Attributes.h"
2524 #include "llvm/IR/Constants.h"
4141 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
4242 AU.addRequired();
4343 AU.addPreserved();
44 AU.addPreserved("stack-protector");
4445 FunctionPass::getAnalysisUsage(AU);
4546 }
4647 };
3131
3232 void getAnalysisUsage(AnalysisUsage &AU) const {
3333 AU.addRequired();
34 AU.addPreserved("stack-protector");
3435 AU.addPreserved();
3536 }
3637
2828
2929 void getAnalysisUsage(AnalysisUsage &AU) const {
3030 AU.addRequired();
31 AU.addPreserved("stack-protector");
3132 AU.addPreserved();
3233 }
3334
2525
2626 NVPTXSplitBBatBar() : FunctionPass(ID) {}
2727 void getAnalysisUsage(AnalysisUsage &AU) const {
28 AU.addPreserved("stack-protector");
2829 AU.addPreserved();
2930 }
3031 virtual bool runOnFunction(Function &F);
16951695 const Value *Op1 = I.getArgOperand(0); // The guard's value.
16961696 const AllocaInst *Slot = cast(I.getArgOperand(1));
16971697
1698 MFI.setStackProtectorIndex(FuncInfo.StaticAllocaMap[Slot]);
1699
16981700 // Grab the frame index.
16991701 X86AddressMode AM;
17001702 if (!X86SelectAddress(Slot, AM)) return false;
12451245 unsigned Size = ArgDI->Flags.getByValSize();
12461246 unsigned Align = std::max(StackSlotSize, ArgDI->Flags.getByValAlign());
12471247 // Create a new object on the stack and copy the pointee into it.
1248 int FI = MFI->CreateStackObject(Size, Align, false, false);
1248 int FI = MFI->CreateStackObject(Size, Align, false);
12491249 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
12501250 InVals.push_back(FIN);
12511251 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
0 ; RUN: llc < %s -disable-fp-elim -march=arm -o - | FileCheck %s
1 ; This test is fairly fragile. The goal is to ensure that "large" stack
2 ; objects are allocated closest to the stack protector (i.e., farthest away
3 ; from the Stack Pointer.) In standard SSP mode this means that large (>=
4 ; ssp-buffer-size) arrays and structures containing such arrays are
5 ; closet to the protector. With sspstrong and sspreq this means large
6 ; arrays/structures-with-arrays are closest, followed by small (< ssp-buffer-size)
7 ; arrays/structures-with-arrays, and then addr-taken variables.
8 ;
9 ; Ideally, we only want verify that the objects appear in the correct groups
10 ; and that the groups have the correct relative stack offset. The ordering
11 ; within a group is not relevant to this test. Unfortunately, there is not
12 ; an elegant way to do this, so just match the offset for each object.
13
14 %struct.struct_large_char = type { [8 x i8] }
15 %struct.struct_large_char2 = type { [2 x i8], [8 x i8] }
16 %struct.struct_small_char = type { [2 x i8] }
17 %struct.struct_large_nonchar = type { [8 x i32] }
18 %struct.struct_small_nonchar = type { [2 x i16] }
19
20 define void @layout_ssp() ssp {
21 entry:
22 ; Expected stack layout for ssp is
23 ; 180 large_char . Group 1, nested arrays, arrays >= ssp-buffer-size
24 ; 172 struct_large_char .
25 ; 168 scalar1 | Everything else
26 ; 164 scalar2
27 ; 160 scalar3
28 ; 156 addr-of
29 ; 152 small_nonchar (84+68)
30 ; 112 large_nonchar
31 ; 110 small_char
32 ; 108 struct_small_char
33 ; 72 struct_large_nonchar
34 ; 68 struct_small_nonchar
35
36 ; CHECK: layout_ssp:
37 ; r[[SP]] is used as an offset into the stack later
38 ; CHECK: add r[[SP:[0-9]+]], sp, #68
39
40 ; CHECK: bl get_scalar1
41 ; CHECK: str r0, [sp, #168]
42 ; CHECK: bl end_scalar1
43
44 ; CHECK: bl get_scalar2
45 ; CHECK: str r0, [sp, #164]
46 ; CHECK: bl end_scalar2
47
48 ; CHECK: bl get_scalar3
49 ; CHECK: str r0, [sp, #160]
50 ; CHECK: bl end_scalar3
51
52 ; CHECK: bl get_addrof
53 ; CHECK: str r0, [sp, #156]
54 ; CHECK: bl end_addrof
55
56 ; CHECK: get_small_nonchar
57 ; CHECK: strh r0, [r[[SP]], #84]
58 ; CHECK: bl end_small_nonchar
59
60 ; CHECK: bl get_large_nonchar
61 ; CHECK: str r0, [sp, #112]
62 ; CHECK: bl end_large_nonchar
63
64 ; CHECK: bl get_small_char
65 ; CHECK: strb r0, [sp, #110]
66 ; CHECK: bl end_small_char
67
68 ; CHECK: bl get_large_char
69 ; CHECK: strb r0, [sp, #180]
70 ; CHECK: bl end_large_char
71
72 ; CHECK: bl get_struct_large_char
73 ; CHECK: strb r0, [sp, #172]
74 ; CHECK: bl end_struct_large_char
75
76 ; CHECK: bl get_struct_small_char
77 ; CHECK: strb r0, [sp, #108]
78 ; CHECK: bl end_struct_small_char
79
80 ; CHECK: bl get_struct_large_nonchar
81 ; CHECK:str r0, [sp, #72]
82 ; CHECK: bl end_struct_large_nonchar
83
84 ; CHECK: bl get_struct_small_nonchar
85 ; CHECK: strh r0, [r[[SP]]]
86 ; CHECK: bl end_struct_small_nonchar
87 %x = alloca i32, align 4
88 %y = alloca i32, align 4
89 %z = alloca i32, align 4
90 %ptr = alloca i32, align 4
91 %small2 = alloca [2 x i16], align 2
92 %large2 = alloca [8 x i32], align 16
93 %small = alloca [2 x i8], align 1
94 %large = alloca [8 x i8], align 1
95 %a = alloca %struct.struct_large_char, align 1
96 %b = alloca %struct.struct_small_char, align 1
97 %c = alloca %struct.struct_large_nonchar, align 8
98 %d = alloca %struct.struct_small_nonchar, align 2
99 %call = call i32 @get_scalar1()
100 store i32 %call, i32* %x, align 4
101 call void @end_scalar1()
102 %call1 = call i32 @get_scalar2()
103 store i32 %call1, i32* %y, align 4
104 call void @end_scalar2()
105 %call2 = call i32 @get_scalar3()
106 store i32 %call2, i32* %z, align 4
107 call void @end_scalar3()
108 %call3 = call i32 @get_addrof()
109 store i32 %call3, i32* %ptr, align 4
110 call void @end_addrof()
111 %call4 = call signext i16 @get_small_nonchar()
112 %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
113 store i16 %call4, i16* %arrayidx, align 2
114 call void @end_small_nonchar()
115 %call5 = call i32 @get_large_nonchar()
116 %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
117 store i32 %call5, i32* %arrayidx6, align 4
118 call void @end_large_nonchar()
119 %call7 = call signext i8 @get_small_char()
120 %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
121 store i8 %call7, i8* %arrayidx8, align 1
122 call void @end_small_char()
123 %call9 = call signext i8 @get_large_char()
124 %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
125 store i8 %call9, i8* %arrayidx10, align 1
126 call void @end_large_char()
127 %call11 = call signext i8 @get_struct_large_char()
128 %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
129 %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
130 store i8 %call11, i8* %arrayidx12, align 1
131 call void @end_struct_large_char()
132 %call13 = call signext i8 @get_struct_small_char()
133 %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
134 %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
135 store i8 %call13, i8* %arrayidx15, align 1
136 call void @end_struct_small_char()
137 %call16 = call i32 @get_struct_large_nonchar()
138 %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
139 %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
140 store i32 %call16, i32* %arrayidx18, align 4
141 call void @end_struct_large_nonchar()
142 %call19 = call signext i16 @get_struct_small_nonchar()
143 %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
144 %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
145 store i16 %call19, i16* %arrayidx21, align 2
146 call void @end_struct_small_nonchar()
147 %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
148 %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
149 %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
150 %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
151 %0 = load i32* %x, align 4
152 %1 = load i32* %y, align 4
153 %2 = load i32* %z, align 4
154 %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
155 %3 = bitcast [8 x i8]* %coerce.dive to i64*
156 %4 = load i64* %3, align 1
157 %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
158 %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
159 %6 = load i16* %5, align 1
160 %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
161 %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
162 %8 = load i32* %7, align 1
163 call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
164 ret void
165 }
166
167 declare i32 @get_scalar1()
168 declare void @end_scalar1()
169
170 declare i32 @get_scalar2()
171 declare void @end_scalar2()
172
173 declare i32 @get_scalar3()
174 declare void @end_scalar3()
175
176 declare i32 @get_addrof()
177 declare void @end_addrof()
178
179 declare signext i16 @get_small_nonchar()
180 declare void @end_small_nonchar()
181
182 declare i32 @get_large_nonchar()
183 declare void @end_large_nonchar()
184
185 declare signext i8 @get_small_char()
186 declare void @end_small_char()
187
188 declare signext i8 @get_large_char()
189 declare void @end_large_char()
190
191 declare signext i8 @get_struct_large_char()
192 declare void @end_struct_large_char()
193
194 declare signext i8 @get_struct_large_char2()
195 declare void @end_struct_large_char2()
196
197 declare signext i8 @get_struct_small_char()
198 declare void @end_struct_small_char()
199
200 declare i32 @get_struct_large_nonchar()
201 declare void @end_struct_large_nonchar()
202
203 declare signext i16 @get_struct_small_nonchar()
204 declare void @end_struct_small_nonchar()
205
206 declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
0 ; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-pc-linux-gnu -mcpu=corei7 -o - | FileCheck %s
1 ; This test is fairly fragile. The goal is to ensure that "large" stack
2 ; objects are allocated closest to the stack protector (i.e., farthest away
3 ; from the Stack Pointer.) In standard SSP mode this means that large (>=
4 ; ssp-buffer-size) arrays and structures containing such arrays are
5 ; closet to the protector. With sspstrong and sspreq this means large
6 ; arrays/structures-with-arrays are closest, followed by small (< ssp-buffer-size)
7 ; arrays/structures-with-arrays, and then addr-taken variables.
8 ;
9 ; Ideally, we only want verify that the objects appear in the correct groups
10 ; and that the groups have the correct relative stack offset. The ordering
11 ; within a group is not relevant to this test. Unfortunately, there is not
12 ; an elegant way to do this, so just match the offset for each object.
13 ; RUN: llc < %s -disable-fp-elim -mtriple=x86_64-unknown-unknown -O0 -mcpu=corei7 -o - \
14 ; RUN: | FileCheck --check-prefix=FAST-NON-LIN %s
15 ; FastISel was not setting the StackProtectorIndex when lowering
16 ; Intrinsic::stackprotector and as a result the stack re-arrangement code was
17 ; never applied. This problem only shows up on non-Linux platforms because on
18 ; Linux the stack protector cookie is loaded from a special address space which
19 ; always triggers standard ISel. Run a basic test to ensure that at -O0
20 ; on a non-linux target the data layout rules are triggered.
21
22 %struct.struct_large_char = type { [8 x i8] }
23 %struct.struct_large_char2 = type { [2 x i8], [8 x i8] }
24 %struct.struct_small_char = type { [2 x i8] }
25 %struct.struct_large_nonchar = type { [8 x i32] }
26 %struct.struct_small_nonchar = type { [2 x i16] }
27
28 define void @layout_ssp() ssp {
29 entry:
30 ; Expected stack layout for ssp is
31 ; -16 large_char . Group 1, nested arrays, arrays >= ssp-buffer-size
32 ; -24 struct_large_char .
33 ; -28 scalar1 | Everything else
34 ; -32 scalar2
35 ; -36 scalar3
36 ; -40 addr-of
37 ; -44 small_nonchar
38 ; -80 large_nonchar
39 ; -82 small_char
40 ; -88 struct_small_char
41 ; -120 struct_large_nonchar
42 ; -128 struct_small_nonchar
43
44 ; CHECK: layout_ssp:
45 ; CHECK: call{{l|q}} get_scalar1
46 ; CHECK: movl %eax, -28(
47 ; CHECK: call{{l|q}} end_scalar1
48
49 ; CHECK: call{{l|q}} get_scalar2
50 ; CHECK: movl %eax, -32(
51 ; CHECK: call{{l|q}} end_scalar2
52
53 ; CHECK: call{{l|q}} get_scalar3
54 ; CHECK: movl %eax, -36(
55 ; CHECK: call{{l|q}} end_scalar3
56
57 ; CHECK: call{{l|q}} get_addrof
58 ; CHECK: movl %eax, -40(
59 ; CHECK: call{{l|q}} end_addrof
60
61 ; CHECK: get_small_nonchar
62 ; CHECK: movw %ax, -44(
63 ; CHECK: call{{l|q}} end_small_nonchar
64
65 ; CHECK: call{{l|q}} get_large_nonchar
66 ; CHECK: movl %eax, -80(
67 ; CHECK: call{{l|q}} end_large_nonchar
68
69 ; CHECK: call{{l|q}} get_small_char
70 ; CHECK: movb %al, -82(
71 ; CHECK: call{{l|q}} end_small_char
72
73 ; CHECK: call{{l|q}} get_large_char
74 ; CHECK: movb %al, -16(
75 ; CHECK: call{{l|q}} end_large_char
76
77 ; CHECK: call{{l|q}} get_struct_large_char
78 ; CHECK: movb %al, -24(
79 ; CHECK: call{{l|q}} end_struct_large_char
80
81 ; CHECK: call{{l|q}} get_struct_small_char
82 ; CHECK: movb %al, -88(
83 ; CHECK: call{{l|q}} end_struct_small_char
84
85 ; CHECK: call{{l|q}} get_struct_large_nonchar
86 ; CHECK: movl %eax, -120(
87 ; CHECK: call{{l|q}} end_struct_large_nonchar
88
89 ; CHECK: call{{l|q}} get_struct_small_nonchar
90 ; CHECK: movw %ax, -128(
91 ; CHECK: call{{l|q}} end_struct_small_nonchar
92 %x = alloca i32, align 4
93 %y = alloca i32, align 4
94 %z = alloca i32, align 4
95 %ptr = alloca i32, align 4
96 %small2 = alloca [2 x i16], align 2
97 %large2 = alloca [8 x i32], align 16
98 %small = alloca [2 x i8], align 1
99 %large = alloca [8 x i8], align 1
100 %a = alloca %struct.struct_large_char, align 1
101 %b = alloca %struct.struct_small_char, align 1
102 %c = alloca %struct.struct_large_nonchar, align 8
103 %d = alloca %struct.struct_small_nonchar, align 2
104 %call = call i32 @get_scalar1()
105 store i32 %call, i32* %x, align 4
106 call void @end_scalar1()
107 %call1 = call i32 @get_scalar2()
108 store i32 %call1, i32* %y, align 4
109 call void @end_scalar2()
110 %call2 = call i32 @get_scalar3()
111 store i32 %call2, i32* %z, align 4
112 call void @end_scalar3()
113 %call3 = call i32 @get_addrof()
114 store i32 %call3, i32* %ptr, align 4
115 call void @end_addrof()
116 %call4 = call signext i16 @get_small_nonchar()
117 %arrayidx = getelementptr inbounds [2 x i16]* %small2, i32 0, i64 0
118 store i16 %call4, i16* %arrayidx, align 2
119 call void @end_small_nonchar()
120 %call5 = call i32 @get_large_nonchar()
121 %arrayidx6 = getelementptr inbounds [8 x i32]* %large2, i32 0, i64 0
122 store i32 %call5, i32* %arrayidx6, align 4
123 call void @end_large_nonchar()
124 %call7 = call signext i8 @get_small_char()
125 %arrayidx8 = getelementptr inbounds [2 x i8]* %small, i32 0, i64 0
126 store i8 %call7, i8* %arrayidx8, align 1
127 call void @end_small_char()
128 %call9 = call signext i8 @get_large_char()
129 %arrayidx10 = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
130 store i8 %call9, i8* %arrayidx10, align 1
131 call void @end_large_char()
132 %call11 = call signext i8 @get_struct_large_char()
133 %foo = getelementptr inbounds %struct.struct_large_char* %a, i32 0, i32 0
134 %arrayidx12 = getelementptr inbounds [8 x i8]* %foo, i32 0, i64 0
135 store i8 %call11, i8* %arrayidx12, align 1
136 call void @end_struct_large_char()
137 %call13 = call signext i8 @get_struct_small_char()
138 %foo14 = getelementptr inbounds %struct.struct_small_char* %b, i32 0, i32 0
139 %arrayidx15 = getelementptr inbounds [2 x i8]* %foo14, i32 0, i64 0
140 store i8 %call13, i8* %arrayidx15, align 1
141 call void @end_struct_small_char()
142 %call16 = call i32 @get_struct_large_nonchar()
143 %foo17 = getelementptr inbounds %struct.struct_large_nonchar* %c, i32 0, i32 0
144 %arrayidx18 = getelementptr inbounds [8 x i32]* %foo17, i32 0, i64 0
145 store i32 %call16, i32* %arrayidx18, align 4
146 call void @end_struct_large_nonchar()
147 %call19 = call signext i16 @get_struct_small_nonchar()
148 %foo20 = getelementptr inbounds %struct.struct_small_nonchar* %d, i32 0, i32 0
149 %arrayidx21 = getelementptr inbounds [2 x i16]* %foo20, i32 0, i64 0
150 store i16 %call19, i16* %arrayidx21, align 2
151 call void @end_struct_small_nonchar()
152 %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
153 %arraydecay22 = getelementptr inbounds [2 x i8]* %small, i32 0, i32 0
154 %arraydecay23 = getelementptr inbounds [8 x i32]* %large2, i32 0, i32 0
155 %arraydecay24 = getelementptr inbounds [2 x i16]* %small2, i32 0, i32 0
156 %0 = load i32* %x, align 4
157 %1 = load i32* %y, align 4
158 %2 = load i32* %z, align 4
159 %coerce.dive = getelementptr %struct.struct_large_char* %a, i32 0, i32 0
160 %3 = bitcast [8 x i8]* %coerce.dive to i64*
161 %4 = load i64* %3, align 1
162 %coerce.dive25 = getelementptr %struct.struct_small_char* %b, i32 0, i32 0
163 %5 = bitcast [2 x i8]* %coerce.dive25 to i16*
164 %6 = load i16* %5, align 1
165 %coerce.dive26 = getelementptr %struct.struct_small_nonchar* %d, i32 0, i32 0
166 %7 = bitcast [2 x i16]* %coerce.dive26 to i32*
167 %8 = load i32* %7, align 1
168 call void @takes_all(i64 %4, i16 %6, %struct.struct_large_nonchar* byval align 8 %c, i32 %8, i8* %arraydecay, i8* %arraydecay22, i32* %arraydecay23, i16* %arraydecay24, i32* %ptr, i32 %0, i32 %1, i32 %2)
169 ret void
170 }
171
172 define void @fast_non_linux() ssp {
173 entry:
174 ; FAST-NON-LIN: fast_non_linux:
175 ; FAST-NON-LIN: call{{l|q}} get_scalar1
176 ; FAST-NON-LIN: movl %eax, -20(
177 ; FAST-NON-LIN: call{{l|q}} end_scalar1
178
179 ; FAST-NON-LIN: call{{l|q}} get_large_char
180 ; FAST-NON-LIN: movb %al, -16(
181 ; FAST-NON-LIN: call{{l|q}} end_large_char
182 %x = alloca i32, align 4
183 %large = alloca [8 x i8], align 1
184 %call = call i32 @get_scalar1()
185 store i32 %call, i32* %x, align 4
186 call void @end_scalar1()
187 %call1 = call signext i8 @get_large_char()
188 %arrayidx = getelementptr inbounds [8 x i8]* %large, i32 0, i64 0
189 store i8 %call1, i8* %arrayidx, align 1
190 call void @end_large_char()
191 %0 = load i32* %x, align 4
192 %arraydecay = getelementptr inbounds [8 x i8]* %large, i32 0, i32 0
193 call void @takes_two(i32 %0, i8* %arraydecay)
194 ret void
195 }
196
197 declare i32 @get_scalar1()
198 declare void @end_scalar1()
199
200 declare i32 @get_scalar2()
201 declare void @end_scalar2()
202
203 declare i32 @get_scalar3()
204 declare void @end_scalar3()
205
206 declare i32 @get_addrof()
207 declare void @end_addrof()
208
209 declare signext i16 @get_small_nonchar()
210 declare void @end_small_nonchar()
211
212 declare i32 @get_large_nonchar()
213 declare void @end_large_nonchar()
214
215 declare signext i8 @get_small_char()
216 declare void @end_small_char()
217
218 declare signext i8 @get_large_char()
219 declare void @end_large_char()
220
221 declare signext i8 @get_struct_large_char()
222 declare void @end_struct_large_char()
223
224 declare signext i8 @get_struct_large_char2()
225 declare void @end_struct_large_char2()
226
227 declare signext i8 @get_struct_small_char()
228 declare void @end_struct_small_char()
229
230 declare i32 @get_struct_large_nonchar()
231 declare void @end_struct_large_nonchar()
232
233 declare signext i16 @get_struct_small_nonchar()
234 declare void @end_struct_small_nonchar()
235
236 declare void @takes_all(i64, i16, %struct.struct_large_nonchar* byval align 8, i32, i8*, i8*, i32*, i16*, i32*, i32, i32, i32)
237 declare void @takes_two(i32, i8*)